leecode更新
This commit is contained in:
parent
c881127f48
commit
b03cc42571
|
|
@ -0,0 +1,213 @@
|
|||
package com.markilue.leecode.backtrace;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* @BelongsProject: Leecode
|
||||
* @BelongsPackage: com.markilue.leecode.backtrace
|
||||
* @Author: markilue
|
||||
* @CreateTime: 2022-10-19 10:06
|
||||
* @Description: TODO 力扣491题 递增子序列:
|
||||
* 给你一个整数数组 nums ,找出并返回所有该数组中不同的递增子序列,递增子序列中 至少有两个元素 。你可以按 任意顺序 返回答案。
|
||||
* 数组中可能含有重复元素,如出现两个整数相等,也可以视作递增序列的一种特殊情况。
|
||||
* @Version: 1.0
|
||||
*/
|
||||
public class FindSubsequences {
|
||||
|
||||
|
||||
@Test
|
||||
public void test() {
|
||||
int[] nums = {4,6,7,7};
|
||||
System.out.println(findSubsequences(nums));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void test1() {
|
||||
int[] nums = {4,4,3,2,1};
|
||||
System.out.println(findSubsequences(nums));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void test2() {
|
||||
int[] nums = {1,2,3,4,5,6,7,8,9,10,1,1,1,1,1};
|
||||
System.out.println(findSubsequences(nums));
|
||||
System.out.println(result.size());
|
||||
}
|
||||
|
||||
List<List<Integer>> result = new ArrayList<>();
|
||||
List<Integer> cur = new ArrayList<>();
|
||||
|
||||
/**
|
||||
* 递增子序列只是在子序列不重复的前提下,增加了递增条件
|
||||
*
|
||||
* @param nums
|
||||
* @return
|
||||
*/
|
||||
public List<List<Integer>> findSubsequences(int[] nums) {
|
||||
|
||||
boolean[] used = new boolean[nums.length];
|
||||
backtracking(nums, 0, used);
|
||||
// backtracking1(nums, 0);
|
||||
|
||||
|
||||
return result;
|
||||
}
|
||||
//解答错误,但是没有发现具体是哪里发生了错误,答案好像也是对的;test2的后面的1111都不能加入答案
|
||||
public void backtracking(int[] nums, int start, boolean[] used) {
|
||||
|
||||
if (start >= nums.length) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (int i = start; i < nums.length; i++) {
|
||||
|
||||
if(i>start&&nums[i]<nums[i-1]){
|
||||
return;
|
||||
}
|
||||
|
||||
//同一树层不能使用相同的数字,TODO 这里的nums[i] == nums[i - 1]不行了,因为数组不能先排序,两个相同的数字不一定是连续的
|
||||
if (i > start && nums[i] == nums[i - 1] && used[i - 1] == true) {
|
||||
continue;
|
||||
}
|
||||
//要求单调递增才加入
|
||||
if (cur.isEmpty() || cur.get(cur.size() - 1) <= nums[i]) {
|
||||
cur.add(nums[i]);
|
||||
used[i] = false;
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (cur.size() >= 2) {
|
||||
result.add(new ArrayList<>(cur));
|
||||
}
|
||||
|
||||
backtracking(nums, i+1, used);
|
||||
//回溯
|
||||
cur.remove(cur.size() - 1);
|
||||
used[i] = true;
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
//代码随想录回溯:速度击败88.08%,内存击败98.92%
|
||||
public void backtracking1(int[] nums, int start) {
|
||||
|
||||
if (cur.size()>1) {
|
||||
result.add(new ArrayList<>(cur));
|
||||
//注意这个不return,要取树上的节点
|
||||
}
|
||||
|
||||
//记录同层数字是否用过
|
||||
HashSet<Integer> set = new HashSet<>();
|
||||
for (int i = start; i < nums.length; i++) {
|
||||
|
||||
//同一树层不能使用相同的数字
|
||||
|
||||
//要求单调递增才加入
|
||||
if ((!cur.isEmpty() && cur.get(cur.size() - 1) > nums[i])||set.contains(nums[i])) {
|
||||
continue;
|
||||
}
|
||||
|
||||
set.add(nums[i]);
|
||||
cur.add(nums[i]);
|
||||
|
||||
backtracking1(nums, i+1);
|
||||
//回溯
|
||||
cur.remove(cur.size() - 1);
|
||||
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
//代码随想录回溯之Hash优化:由于题设数值范围为[-100,100],完全可以使用数组来进行Hash优化
|
||||
// 速度击败61.89%,内存击败28.23%
|
||||
public void backtracking2(int[] nums, int start) {
|
||||
|
||||
if (cur.size()>1) {
|
||||
result.add(new ArrayList<>(cur));
|
||||
//注意这个不return,要取树上的节点
|
||||
}
|
||||
|
||||
//记录同层数字是否用过
|
||||
int[] used = new int[201];
|
||||
for (int i = start; i < nums.length; i++) {
|
||||
|
||||
//同一树层不能使用相同的数字
|
||||
|
||||
//要求单调递增才加入
|
||||
if ((!cur.isEmpty() && cur.get(cur.size() - 1) > nums[i])||used[nums[i]+100]==1) {
|
||||
continue;
|
||||
}
|
||||
|
||||
used[nums[i]+100]=1;//记录树层使用过,本层不能继续使用
|
||||
cur.add(nums[i]);
|
||||
|
||||
backtracking2(nums, i+1);
|
||||
//回溯
|
||||
cur.remove(cur.size() - 1);
|
||||
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* 官方的 二进制枚举+哈希法
|
||||
*/
|
||||
List<Integer> temp = new ArrayList<Integer>();
|
||||
List<List<Integer>> ans = new ArrayList<List<Integer>>();
|
||||
Set<Integer> set = new HashSet<Integer>();
|
||||
int n;
|
||||
|
||||
public List<List<Integer>> findSubsequences2(int[] nums) {
|
||||
n = nums.length;
|
||||
for (int i = 0; i < (1 << n); ++i) {
|
||||
findSubsequences(i, nums);
|
||||
int hashValue = getHash(263, (int) 1E9 + 7);
|
||||
//使用监测hash值取去重,set中添加hash值
|
||||
if (check() && !set.contains(hashValue)) {
|
||||
ans.add(new ArrayList<Integer>(temp));
|
||||
set.add(hashValue);
|
||||
}
|
||||
}
|
||||
return ans;
|
||||
}
|
||||
|
||||
public void findSubsequences(int mask, int[] nums) {
|
||||
temp.clear();
|
||||
for (int i = 0; i < n; ++i) {
|
||||
if ((mask & 1) != 0) {
|
||||
temp.add(nums[i]);
|
||||
}
|
||||
mask >>= 1;
|
||||
}
|
||||
}
|
||||
|
||||
public int getHash(int base, int mod) {
|
||||
int hashValue = 0;
|
||||
for (int x : temp) {
|
||||
hashValue = hashValue * base % mod + (x + 101);
|
||||
hashValue %= mod;
|
||||
}
|
||||
return hashValue;
|
||||
}
|
||||
|
||||
public boolean check() {
|
||||
for (int i = 1; i < temp.size(); ++i) {
|
||||
if (temp.get(i) < temp.get(i - 1)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return temp.size() >= 2;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,98 @@
|
|||
package com.markilue.leecode.backtrace;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* @BelongsProject: Leecode
|
||||
* @BelongsPackage: com.markilue.leecode.backtrace
|
||||
* @Author: markilue
|
||||
* @CreateTime: 2022-10-19 11:16
|
||||
* @Description:
|
||||
* TODO leecode 46题 全排列:
|
||||
* 给定一个不含重复数字的数组 nums ,返回其 所有可能的全排列 。你可以 按任意顺序 返回答案。
|
||||
* @Version: 1.0
|
||||
*/
|
||||
public class Permute {
|
||||
|
||||
@Test
|
||||
public void test(){
|
||||
int[] nums = {1,2,3};
|
||||
System.out.println(permute(nums));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void test1(){
|
||||
int[] nums = {0,1};
|
||||
System.out.println(permute(nums));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void test2(){
|
||||
int[] nums = {0};
|
||||
System.out.println(permute(nums));
|
||||
}
|
||||
|
||||
/**
|
||||
* 自己思路:全排列数组即数组中不包含该元素即可
|
||||
* 速度击败81.77%,内存击败22.57%
|
||||
* @param nums
|
||||
* @return
|
||||
*/
|
||||
public List<List<Integer>> permute(int[] nums) {
|
||||
|
||||
|
||||
// backtracking(nums);
|
||||
backtracking1(nums,new boolean[nums.length]);
|
||||
return result;
|
||||
}
|
||||
|
||||
List<List<Integer>> result = new ArrayList<>();
|
||||
List<Integer> cur = new ArrayList<>();
|
||||
|
||||
public void backtracking(int[] nums){
|
||||
|
||||
|
||||
if(cur.size()==nums.length){
|
||||
result.add(new ArrayList<>(cur));
|
||||
return;
|
||||
}
|
||||
|
||||
for (int i = 0; i < nums.length; i++) {
|
||||
if(cur.contains(nums[i])){
|
||||
continue;
|
||||
}
|
||||
cur.add(nums[i]);
|
||||
backtracking(nums);
|
||||
cur.remove(cur.size()-1);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
//代码随想录:使用used数组记录是否用过
|
||||
//速度击败100%,内存击败38.03%
|
||||
public void backtracking1(int[] nums,boolean[] used){
|
||||
|
||||
if(cur.size()==nums.length){
|
||||
result.add(new ArrayList<>(cur));
|
||||
return;
|
||||
}
|
||||
|
||||
for (int i = 0; i < nums.length; i++) {
|
||||
|
||||
if(used[i]==true){
|
||||
continue;
|
||||
}
|
||||
used[i]=true;
|
||||
cur.add(nums[i]);
|
||||
backtracking1(nums,used);
|
||||
cur.remove(cur.size()-1);
|
||||
used[i]=false;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
|
@ -472,19 +472,30 @@ def showResult(step_two_model: Joint_Monitoring, test_data, isPlot: bool = False
|
|||
total_result.append(output4)
|
||||
total_result = np.reshape(total_result, [total_result.__len__(), -1])
|
||||
total_result = np.reshape(total_result, [-1, ])
|
||||
|
||||
#误报率,漏报率,准确性的计算
|
||||
|
||||
|
||||
if isSave:
|
||||
|
||||
np.savetxt(save_mse_name, total_result, delimiter=',')
|
||||
if isPlot:
|
||||
plt.figure(1, figsize=(6.0, 2.68))
|
||||
plt.subplots_adjust(left=0.1, right=0.94, bottom=0.2, top=0.9, wspace=None,
|
||||
hspace=None)
|
||||
plt.tight_layout()
|
||||
font1 = {'family': 'Times New Roman', 'weight': 'normal', 'size': 10} # 设置坐标标签的字体大小,字体
|
||||
|
||||
plt.scatter(list(range(total_result.shape[0])), total_result, c='black', s=10)
|
||||
# 画出 y=1 这条水平线
|
||||
plt.axhline(0.5, c='red', label='Failure threshold')
|
||||
# 箭头指向上面的水平线
|
||||
# plt.arrow(35000, 0.9, 33000, 0.75, head_width=0.02, head_length=0.1, shape="full", fc='red', ec='red',
|
||||
# alpha=0.9, overhang=0.5)
|
||||
# plt.text(35000, 0.9, "Truth Fault", fontsize=10, color='black', verticalalignment='top')
|
||||
plt.text(test_data.shape[0] * 2 / 3+1000, 0.7, "Truth Fault", fontsize=10, color='red', verticalalignment='top')
|
||||
plt.axvline(test_data.shape[0] * 2 / 3, c='blue', ls='-.')
|
||||
plt.xticks(range(6),('06/09/17','12/09/17','18/09/17','24/09/17','29/09/17')) # 设置x轴的标尺
|
||||
plt.tick_params() #设置轴显示
|
||||
plt.xlabel("time",fontdict=font1)
|
||||
plt.ylabel("confience",fontdict=font1)
|
||||
plt.text(total_result.shape[0] * 4 / 5, 0.6, "Fault", fontsize=10, color='black', verticalalignment='top',
|
||||
|
|
|
|||
|
|
@ -7,4 +7,668 @@
|
|||
@Date : 2022/10/11 19:00
|
||||
@Usage :
|
||||
@Desc : 使用MSE作为损失函数的RNet
|
||||
'''
|
||||
'''
|
||||
|
||||
import tensorflow as tf
|
||||
import tensorflow.keras
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import matplotlib.pyplot as plt
|
||||
from model.DepthwiseCon1D.DepthwiseConv1D import DepthwiseConv1D
|
||||
from model.Dynamic_channelAttention.Dynamic_channelAttention import DynamicChannelAttention
|
||||
from condition_monitoring.data_deal import loadData
|
||||
from model.Joint_Monitoring.compare.RNet_MSE import Joint_Monitoring
|
||||
|
||||
from model.CommonFunction.CommonFunction import *
|
||||
from sklearn.model_selection import train_test_split
|
||||
from tensorflow.keras.models import load_model, save_model
|
||||
import random
|
||||
|
||||
'''超参数设置'''
|
||||
time_stamp = 120
|
||||
feature_num = 10
|
||||
batch_size = 16
|
||||
learning_rate = 0.001
|
||||
EPOCH = 101
|
||||
model_name = "RNet_MSE"
|
||||
'''EWMA超参数'''
|
||||
K = 18
|
||||
namuda = 0.01
|
||||
'''保存名称'''
|
||||
|
||||
save_name = "./model/weight/{0}_weight/weight".format(model_name,
|
||||
time_stamp,
|
||||
feature_num,
|
||||
batch_size,
|
||||
EPOCH)
|
||||
save_step_two_name = "./model/two_weight/{0}_timestamp{1}_feature{2}_weight/weight".format(model_name,
|
||||
time_stamp,
|
||||
feature_num,
|
||||
batch_size,
|
||||
EPOCH)
|
||||
|
||||
|
||||
save_mse_name = "./mse/RNet_MSE/{0}_timestamp{1}_feature{2}_mse.csv".format(model_name,
|
||||
time_stamp,
|
||||
feature_num,
|
||||
batch_size,
|
||||
EPOCH)
|
||||
save_max_name = "./mse/RNet_MSE/{0}_timestamp{1}_feature{2}_max.csv".format(model_name,
|
||||
time_stamp,
|
||||
feature_num,
|
||||
batch_size,
|
||||
EPOCH)
|
||||
# save_name = "../model/joint/{0}_timestamp{1}_feature{2}.h5".format(model_name,
|
||||
# time_stamp,
|
||||
# feature_num,
|
||||
# batch_size,
|
||||
# EPOCH)
|
||||
# save_step_two_name = "../model/joint_two/{0}_timestamp{1}_feature{2}.h5".format(model_name,
|
||||
# time_stamp,
|
||||
# feature_num,
|
||||
# batch_size,
|
||||
# EPOCH)
|
||||
'''文件名'''
|
||||
file_name = "G:\data\SCADA数据\jb4q_8_delete_total_zero.csv"
|
||||
|
||||
'''
|
||||
文件说明:jb4q_8_delete_total_zero.csv是删除了只删除了全是0的列的文件
|
||||
文件从0:415548行均是正常值(2019/7.30 00:00:00 - 2019/9/18 11:14:00)
|
||||
从415549:432153行均是异常值(2019/9/18 11:21:01 - 2021/1/18 00:00:00)
|
||||
'''
|
||||
'''文件参数'''
|
||||
# 最后正常的时间点
|
||||
healthy_date = 415548
|
||||
# 最后异常的时间点
|
||||
unhealthy_date = 432153
|
||||
# 异常容忍程度
|
||||
unhealthy_patience = 5
|
||||
|
||||
|
||||
def remove(data, time_stamp=time_stamp):
|
||||
rows, cols = data.shape
|
||||
print("remove_data.shape:", data.shape)
|
||||
num = int(rows / time_stamp)
|
||||
|
||||
return data[:num * time_stamp, :]
|
||||
pass
|
||||
|
||||
|
||||
# 不重叠采样
|
||||
def get_training_data(data, time_stamp: int = time_stamp):
|
||||
removed_data = remove(data=data)
|
||||
rows, cols = removed_data.shape
|
||||
print("removed_data.shape:", data.shape)
|
||||
print("removed_data:", removed_data)
|
||||
train_data = np.reshape(removed_data, [-1, time_stamp, cols])
|
||||
print("train_data:", train_data)
|
||||
batchs, time_stamp, cols = train_data.shape
|
||||
|
||||
for i in range(1, batchs):
|
||||
each_label = np.expand_dims(train_data[i, 0, :], axis=0)
|
||||
if i == 1:
|
||||
train_label = each_label
|
||||
else:
|
||||
train_label = np.concatenate([train_label, each_label], axis=0)
|
||||
|
||||
print("train_data.shape:", train_data.shape)
|
||||
print("train_label.shape", train_label.shape)
|
||||
return train_data[:-1, :], train_label
|
||||
|
||||
|
||||
# 重叠采样
|
||||
def get_training_data_overlapping(data, time_stamp: int = time_stamp, is_Healthy: bool = True):
|
||||
rows, cols = data.shape
|
||||
train_data = np.empty(shape=[rows - time_stamp - 1, time_stamp, cols])
|
||||
train_label = np.empty(shape=[rows - time_stamp - 1, cols])
|
||||
for i in range(rows):
|
||||
if i + time_stamp >= rows:
|
||||
break
|
||||
if i + time_stamp < rows - 1:
|
||||
train_data[i] = data[i:i + time_stamp]
|
||||
train_label[i] = data[i + time_stamp]
|
||||
|
||||
print("重叠采样以后:")
|
||||
print("data:", train_data) # (300334,120,10)
|
||||
print("label:", train_label) # (300334,10)
|
||||
|
||||
if is_Healthy:
|
||||
train_label2 = np.ones(shape=[train_label.shape[0]])
|
||||
else:
|
||||
train_label2 = np.zeros(shape=[train_label.shape[0]])
|
||||
|
||||
print("label2:", train_label2)
|
||||
|
||||
return train_data, train_label, train_label2
|
||||
|
||||
|
||||
# RepConv重参数化卷积
|
||||
def RepConv(input_tensor, k=3):
|
||||
_, _, output_dim = input_tensor.shape
|
||||
conv1 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=k, strides=1, padding='SAME')(input_tensor)
|
||||
b1 = tf.keras.layers.BatchNormalization()(conv1)
|
||||
|
||||
conv2 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=1, strides=1, padding='SAME')(input_tensor)
|
||||
b2 = tf.keras.layers.BatchNormalization()(conv2)
|
||||
|
||||
b3 = tf.keras.layers.BatchNormalization()(input_tensor)
|
||||
|
||||
out = tf.keras.layers.Add()([b1, b2, b3])
|
||||
out = tf.nn.relu(out)
|
||||
return out
|
||||
|
||||
|
||||
# RepBlock模块
|
||||
def RepBlock(input_tensor, num: int = 3):
|
||||
for i in range(num):
|
||||
input_tensor = RepConv(input_tensor)
|
||||
return input_tensor
|
||||
|
||||
|
||||
# GAP 全局平均池化
|
||||
def Global_avg_channelAttention(input_tensor):
|
||||
_, length, channel = input_tensor.shape
|
||||
DWC1 = DepthwiseConv1D(kernel_size=1, padding='SAME')(input_tensor)
|
||||
GAP = tf.keras.layers.GlobalAvgPool1D()(DWC1)
|
||||
c1 = tf.keras.layers.Conv1D(filters=channel, kernel_size=1, padding='SAME')(GAP)
|
||||
s1 = tf.nn.sigmoid(c1)
|
||||
output = tf.multiply(input_tensor, s1)
|
||||
return output
|
||||
|
||||
|
||||
# GDP 全局动态池化
|
||||
def Global_Dynamic_channelAttention(input_tensor):
|
||||
_, length, channel = input_tensor.shape
|
||||
DWC1 = DepthwiseConv1D(kernel_size=1, padding='SAME')(input_tensor)
|
||||
|
||||
# GAP
|
||||
GAP = tf.keras.layers.GlobalAvgPool1D()(DWC1)
|
||||
c1 = tf.keras.layers.Conv1D(filters=channel, kernel_size=1, padding='SAME')(GAP)
|
||||
s1 = tf.nn.sigmoid(c1)
|
||||
|
||||
# GMP
|
||||
GMP = tf.keras.layers.GlobalMaxPool1D()(DWC1)
|
||||
c2 = tf.keras.layers.Conv1D(filters=channel, kernel_size=1, padding='SAME')(GMP)
|
||||
s3 = tf.nn.sigmoid(c2)
|
||||
|
||||
output = tf.multiply(input_tensor, s1)
|
||||
return output
|
||||
|
||||
|
||||
# 归一化
|
||||
def normalization(data):
|
||||
rows, cols = data.shape
|
||||
print("归一化之前:", data)
|
||||
print(data.shape)
|
||||
print("======================")
|
||||
|
||||
# 归一化
|
||||
max = np.max(data, axis=0)
|
||||
max = np.broadcast_to(max, [rows, cols])
|
||||
min = np.min(data, axis=0)
|
||||
min = np.broadcast_to(min, [rows, cols])
|
||||
|
||||
data = (data - min) / (max - min)
|
||||
print("归一化之后:", data)
|
||||
print(data.shape)
|
||||
|
||||
return data
|
||||
|
||||
|
||||
# 正则化
|
||||
def Regularization(data):
|
||||
rows, cols = data.shape
|
||||
print("正则化之前:", data)
|
||||
print(data.shape)
|
||||
print("======================")
|
||||
|
||||
# 正则化
|
||||
mean = np.mean(data, axis=0)
|
||||
mean = np.broadcast_to(mean, shape=[rows, cols])
|
||||
dst = np.sqrt(np.var(data, axis=0))
|
||||
dst = np.broadcast_to(dst, shape=[rows, cols])
|
||||
data = (data - mean) / dst
|
||||
print("正则化之后:", data)
|
||||
print(data.shape)
|
||||
|
||||
return data
|
||||
pass
|
||||
|
||||
|
||||
def EWMA(data, K=K, namuda=namuda):
|
||||
# t是啥暂时未知
|
||||
t = 0
|
||||
mid = np.mean(data, axis=0)
|
||||
standard = np.sqrt(np.var(data, axis=0))
|
||||
UCL = mid + K * standard * np.sqrt(namuda / (2 - namuda) * (1 - (1 - namuda) ** 2 * t))
|
||||
LCL = mid - K * standard * np.sqrt(namuda / (2 - namuda) * (1 - (1 - namuda) ** 2 * t))
|
||||
return mid, UCL, LCL
|
||||
pass
|
||||
|
||||
|
||||
# trian_data:(300455,120,10)
|
||||
# trian_label1:(300455,10)
|
||||
# trian_label2:(300455,)
|
||||
def shuffle(train_data, train_label1, train_label2, is_split: bool = False, split_size: float = 0.2):
|
||||
(train_data, test_data, train_label1, test_label1, train_label2, test_label2) = train_test_split(train_data,
|
||||
train_label1,
|
||||
train_label2,
|
||||
test_size=split_size,
|
||||
shuffle=True,
|
||||
random_state=100)
|
||||
if is_split:
|
||||
return train_data, train_label1, train_label2, test_data, test_label1, test_label2
|
||||
train_data = np.concatenate([train_data, test_data], axis=0)
|
||||
train_label1 = np.concatenate([train_label1, test_label1], axis=0)
|
||||
train_label2 = np.concatenate([train_label2, test_label2], axis=0)
|
||||
# print(train_data.shape)
|
||||
# print(train_label1.shape)
|
||||
# print(train_label2.shape)
|
||||
# print(train_data.shape)
|
||||
|
||||
return train_data, train_label1, train_label2
|
||||
pass
|
||||
|
||||
|
||||
def split_test_data(healthy_data, healthy_label1, healthy_label2, unhealthy_data, unhealthy_label1, unhealthy_label2,
|
||||
split_size: float = 0.2, shuffle: bool = True):
|
||||
data = np.concatenate([healthy_data, unhealthy_data], axis=0)
|
||||
label1 = np.concatenate([healthy_label1, unhealthy_label1], axis=0)
|
||||
label2 = np.concatenate([healthy_label2, unhealthy_label2], axis=0)
|
||||
(train_data, test_data, train_label1, test_label1, train_label2, test_label2) = train_test_split(data,
|
||||
label1,
|
||||
label2,
|
||||
test_size=split_size,
|
||||
shuffle=shuffle,
|
||||
random_state=100)
|
||||
|
||||
# print(train_data.shape)
|
||||
# print(train_label1.shape)
|
||||
# print(train_label2.shape)
|
||||
# print(train_data.shape)
|
||||
|
||||
return train_data, train_label1, train_label2, test_data, test_label1, test_label2
|
||||
|
||||
pass
|
||||
|
||||
|
||||
# trian_data:(300455,120,10)
|
||||
# trian_label1:(300455,10)
|
||||
# trian_label2:(300455,)
|
||||
def train_step_one(train_data, train_label1, train_label2):
|
||||
model = Joint_Monitoring()
|
||||
# # # # TODO 需要运行编译一次,才能打印model.summary()
|
||||
# model.build(input_shape=(batch_size, filter_num, dims))
|
||||
# model.summary()
|
||||
history_loss = []
|
||||
history_val_loss = []
|
||||
learning_rate = 1e-3
|
||||
for epoch in range(EPOCH):
|
||||
|
||||
print()
|
||||
print("EPOCH:", epoch, "/", EPOCH, ":")
|
||||
train_data, train_label1, train_label2 = shuffle(train_data, train_label1, train_label2)
|
||||
if epoch == 0:
|
||||
train_data, train_label1, train_label2, val_data, val_label1, val_label2 = shuffle(train_data, train_label1,
|
||||
train_label2,
|
||||
is_split=True)
|
||||
# print()
|
||||
# print("EPOCH:", epoch, "/", EPOCH, ":")
|
||||
# 用于让train知道,这是这个epoch中的第几次训练
|
||||
z = 0
|
||||
# 用于batch_size次再训练
|
||||
k = 1
|
||||
for data_1, label_1, label_2 in zip(train_data, train_label1, train_label2):
|
||||
size, _, _ = train_data.shape
|
||||
data_1 = tf.expand_dims(data_1, axis=0)
|
||||
label_1 = tf.expand_dims(label_1, axis=0)
|
||||
label_2 = tf.expand_dims(label_2, axis=0)
|
||||
if batch_size != 1:
|
||||
if k % batch_size == 1:
|
||||
data = data_1
|
||||
label1 = label_1
|
||||
label2 = label_2
|
||||
else:
|
||||
data = tf.concat([data, data_1], axis=0)
|
||||
label1 = tf.concat([label1, label_1], axis=0)
|
||||
label2 = tf.concat([label2, label_2], axis=0)
|
||||
else:
|
||||
data = data_1
|
||||
label1 = label_1
|
||||
label2 = label_2
|
||||
|
||||
if k % batch_size == 0:
|
||||
# label = tf.expand_dims(label, axis=-1)
|
||||
loss_value, accuracy_value = model.train(input_tensor=data, label1=label1, label2=label2,
|
||||
learning_rate=learning_rate,
|
||||
is_first_time=True)
|
||||
print(z * batch_size, "/", size, ":===============>", "loss:", loss_value.numpy())
|
||||
k = 0
|
||||
z = z + 1
|
||||
k = k + 1
|
||||
val_loss, val_accuracy = model.get_val_loss(val_data=val_data, val_label1=val_label1, val_label2=val_label2,
|
||||
is_first_time=True)
|
||||
SaveBestModel(model=model, save_name=save_name, history_loss=history_val_loss, loss_value=val_loss.numpy())
|
||||
# SaveBestH5Model(model=model, save_name=save_name, history_loss=history_val_loss, loss_value=val_loss.numpy())
|
||||
history_val_loss.append(val_loss)
|
||||
history_loss.append(loss_value.numpy())
|
||||
print('Training loss is :', loss_value.numpy())
|
||||
print('Validating loss is :', val_loss.numpy())
|
||||
if IsStopTraining(history_loss=history_val_loss, patience=7):
|
||||
break
|
||||
if Is_Reduce_learning_rate(history_loss=history_val_loss, patience=3):
|
||||
if learning_rate >= 1e-4:
|
||||
learning_rate = learning_rate * 0.1
|
||||
pass
|
||||
|
||||
|
||||
def train_step_two(step_one_model, step_two_model, train_data, train_label1, train_label2):
|
||||
# step_two_model = Joint_Monitoring()
|
||||
# step_two_model.build(input_shape=(batch_size, time_stamp, feature_num))
|
||||
# step_two_model.summary()
|
||||
history_loss = []
|
||||
history_val_loss = []
|
||||
history_accuracy = []
|
||||
learning_rate = 1e-3
|
||||
for epoch in range(EPOCH):
|
||||
print()
|
||||
print("EPOCH:", epoch, "/", EPOCH, ":")
|
||||
train_data, train_label1, train_label2 = shuffle(train_data, train_label1, train_label2)
|
||||
if epoch == 0:
|
||||
train_data, train_label1, train_label2, val_data, val_label1, val_label2 = shuffle(train_data, train_label1,
|
||||
train_label2,
|
||||
is_split=True)
|
||||
# print()
|
||||
# print("EPOCH:", epoch, "/", EPOCH, ":")
|
||||
# 用于让train知道,这是这个epoch中的第几次训练
|
||||
z = 0
|
||||
# 用于batch_size次再训练
|
||||
k = 1
|
||||
accuracy_num = 0
|
||||
for data_1, label_1, label_2 in zip(train_data, train_label1, train_label2):
|
||||
size, _, _ = train_data.shape
|
||||
data_1 = tf.expand_dims(data_1, axis=0)
|
||||
label_1 = tf.expand_dims(label_1, axis=0)
|
||||
label_2 = tf.expand_dims(label_2, axis=0)
|
||||
if batch_size != 1:
|
||||
if k % batch_size == 1:
|
||||
data = data_1
|
||||
label1 = label_1
|
||||
label2 = label_2
|
||||
else:
|
||||
data = tf.concat([data, data_1], axis=0)
|
||||
label1 = tf.concat([label1, label_1], axis=0)
|
||||
label2 = tf.concat([label2, label_2], axis=0)
|
||||
else:
|
||||
data = data_1
|
||||
label1 = label_1
|
||||
label2 = label_2
|
||||
|
||||
if k % batch_size == 0:
|
||||
# label = tf.expand_dims(label, axis=-1)
|
||||
output1, output2, output3, _ = step_one_model.call(inputs=data, is_first_time=True)
|
||||
loss_value, accuracy_value = step_two_model.train(input_tensor=data, label1=label1, label2=label2,
|
||||
learning_rate=learning_rate,
|
||||
is_first_time=False, pred_3=output1, pred_4=output2,
|
||||
pred_5=output3)
|
||||
accuracy_num += accuracy_value
|
||||
print(z * batch_size, "/", size, ":===============>", "loss:", loss_value.numpy(), "| accuracy:",
|
||||
accuracy_num / ((z + 1) * batch_size))
|
||||
k = 0
|
||||
z = z + 1
|
||||
k = k + 1
|
||||
|
||||
val_loss, val_accuracy = step_two_model.get_val_loss(val_data=val_data, val_label1=val_label1,
|
||||
val_label2=val_label2,
|
||||
is_first_time=False, step_one_model=step_one_model)
|
||||
SaveBestModelByAccuracy(model=step_two_model, save_name=save_step_two_name, history_accuracy=history_accuracy,
|
||||
accuracy_value=val_accuracy)
|
||||
history_val_loss.append(val_loss)
|
||||
history_loss.append(loss_value.numpy())
|
||||
history_accuracy.append(val_accuracy)
|
||||
print('Training loss is : {0} | Training accuracy is : {1}'.format(loss_value.numpy(),
|
||||
accuracy_num / ((z + 1) * batch_size)))
|
||||
print('Validating loss is : {0} | Validating accuracy is : {1}'.format(val_loss.numpy(), val_accuracy))
|
||||
if IsStopTraining(history_loss=history_val_loss, patience=7):
|
||||
break
|
||||
if Is_Reduce_learning_rate(history_loss=history_val_loss, patience=3):
|
||||
if learning_rate >= 1e-4:
|
||||
learning_rate = learning_rate * 0.1
|
||||
pass
|
||||
|
||||
|
||||
def test(step_one_model, step_two_model, test_data, test_label1, test_label2):
|
||||
history_loss = []
|
||||
history_val_loss = []
|
||||
|
||||
val_loss, val_accuracy = step_two_model.get_val_loss(val_data=test_data, val_label1=test_label1,
|
||||
val_label2=test_label2,
|
||||
is_first_time=False, step_one_model=step_one_model)
|
||||
|
||||
history_val_loss.append(val_loss)
|
||||
print("val_accuracy:", val_accuracy)
|
||||
print("val_loss:", val_loss)
|
||||
|
||||
|
||||
def showResult(step_two_model: Joint_Monitoring, test_data, isPlot: bool = False):
|
||||
# 获取模型的所有参数的个数
|
||||
# step_two_model.count_params()
|
||||
total_result = []
|
||||
size, length, dims = test_data.shape
|
||||
for epoch in range(0, size - batch_size + 1, batch_size):
|
||||
each_test_data = test_data[epoch:epoch + batch_size, :, :]
|
||||
_, _, _, output4 = step_two_model.call(each_test_data, is_first_time=False)
|
||||
total_result.append(output4)
|
||||
total_result = np.reshape(total_result, [total_result.__len__(), -1])
|
||||
total_result = np.reshape(total_result, [-1, ])
|
||||
if isPlot:
|
||||
plt.scatter(list(range(total_result.shape[0])), total_result, c='black', s=10)
|
||||
# 画出 y=1 这条水平线
|
||||
plt.axhline(0.5, c='red', label='Failure threshold')
|
||||
# 箭头指向上面的水平线
|
||||
# plt.arrow(35000, 0.9, 33000, 0.75, head_width=0.02, head_length=0.1, shape="full", fc='red', ec='red',
|
||||
# alpha=0.9, overhang=0.5)
|
||||
# plt.text(35000, 0.9, "Truth Fault", fontsize=10, color='black', verticalalignment='top')
|
||||
plt.axvline(test_data.shape[0] * 2 / 3, c='blue', ls='-.')
|
||||
plt.xlabel("time")
|
||||
plt.ylabel("confience")
|
||||
plt.text(total_result.shape[0] * 4 / 5, 0.6, "Fault", fontsize=10, color='black', verticalalignment='top',
|
||||
horizontalalignment='center',
|
||||
bbox={'facecolor': 'grey',
|
||||
'pad': 10})
|
||||
plt.text(total_result.shape[0] * 1 / 3, 0.4, "Norm", fontsize=10, color='black', verticalalignment='top',
|
||||
horizontalalignment='center',
|
||||
bbox={'facecolor': 'grey',
|
||||
'pad': 10})
|
||||
plt.grid()
|
||||
# plt.ylim(0, 1)
|
||||
# plt.xlim(-50, 1300)
|
||||
# plt.legend("", loc='upper left')
|
||||
plt.show()
|
||||
return total_result
|
||||
|
||||
|
||||
def get_MSE(data, label, new_model, isStandard: bool = True, isPlot: bool = True, predictI: int = 1):
|
||||
predicted_data1 = []
|
||||
predicted_data2 = []
|
||||
predicted_data3 = []
|
||||
size, length, dims = data.shape
|
||||
for epoch in range(0, size, batch_size):
|
||||
each_test_data = data[epoch:epoch + batch_size, :, :]
|
||||
output1, output2, output3, _ = new_model.call(inputs=each_test_data, is_first_time=True)
|
||||
if epoch == 0:
|
||||
predicted_data1 = output1
|
||||
predicted_data2 = output2
|
||||
predicted_data3 = output3
|
||||
else:
|
||||
predicted_data1 = np.concatenate([predicted_data1, output1], axis=0)
|
||||
predicted_data2 = np.concatenate([predicted_data2, output2], axis=0)
|
||||
predicted_data3 = np.concatenate([predicted_data3, output3], axis=0)
|
||||
|
||||
predicted_data1 = np.reshape(predicted_data1, [-1, 10])
|
||||
predicted_data2 = np.reshape(predicted_data2, [-1, 10])
|
||||
predicted_data3 = np.reshape(predicted_data3, [-1, 10])
|
||||
predict_data = 0
|
||||
|
||||
predict_data = predicted_data1
|
||||
mseList = []
|
||||
meanList = []
|
||||
maxList = []
|
||||
|
||||
for i in range(1, 4):
|
||||
print("i:", i)
|
||||
if i == 1:
|
||||
predict_data = predicted_data1
|
||||
elif i == 2:
|
||||
predict_data = predicted_data2
|
||||
elif i == 3:
|
||||
predict_data = predicted_data3
|
||||
temp = np.abs(predict_data - label)
|
||||
temp1 = (temp - np.broadcast_to(np.mean(temp, axis=0), shape=predict_data.shape))
|
||||
temp2 = np.broadcast_to(np.sqrt(np.var(temp, axis=0)), shape=predict_data.shape)
|
||||
temp3 = temp1 / temp2
|
||||
mse = np.sum((temp1 / temp2) ** 2, axis=1)
|
||||
|
||||
print("mse.shape:", mse.shape)
|
||||
# mse=np.mean((predicted_data-label)**2,axis=1)
|
||||
# print("mse", mse)
|
||||
mseList.append(mse)
|
||||
if isStandard:
|
||||
dims, = mse.shape
|
||||
mean = np.mean(mse)
|
||||
std = np.sqrt(np.var(mse))
|
||||
max = mean + 3 * std
|
||||
print("max.shape:", max.shape)
|
||||
# min = mean-3*std
|
||||
max = np.broadcast_to(max, shape=[dims, ])
|
||||
# min = np.broadcast_to(min,shape=[dims,])
|
||||
mean = np.broadcast_to(mean, shape=[dims, ])
|
||||
if isPlot:
|
||||
plt.figure(random.randint(1, 100))
|
||||
plt.plot(max)
|
||||
plt.plot(mse)
|
||||
plt.plot(mean)
|
||||
# plt.plot(min)
|
||||
plt.show()
|
||||
maxList.append(max)
|
||||
meanList.append(mean)
|
||||
else:
|
||||
if isPlot:
|
||||
plt.figure(random.randint(1, 100))
|
||||
plt.plot(mse)
|
||||
# plt.plot(min)
|
||||
plt.show()
|
||||
|
||||
return mseList, meanList, maxList
|
||||
# pass
|
||||
|
||||
|
||||
# healthy_data是健康数据,用于确定阈值,all_data是完整的数据,用于模型出结果
|
||||
def getResult(model: tf.keras.Model, healthy_data, healthy_label, unhealthy_data, unhealthy_label, isPlot: bool = False,
|
||||
isSave: bool = False, predictI: int = 1):
|
||||
# TODO 计算MSE确定阈值
|
||||
# plt.ion()
|
||||
mseList, meanList, maxList = get_MSE(healthy_data, healthy_label, model)
|
||||
mse1List, _, _ = get_MSE(unhealthy_data, unhealthy_label, model, isStandard=False)
|
||||
|
||||
for mse, mean, max, mse1,j in zip(mseList, meanList, maxList, mse1List,range(3)):
|
||||
|
||||
# 误报率的计算
|
||||
total, = mse.shape
|
||||
faultNum = 0
|
||||
faultList = []
|
||||
for i in range(total):
|
||||
if (mse[i] > max[i]):
|
||||
faultNum += 1
|
||||
faultList.append(mse[i])
|
||||
|
||||
fault_rate = faultNum / total
|
||||
print("误报率:", fault_rate)
|
||||
|
||||
# 漏报率计算
|
||||
missNum = 0
|
||||
missList = []
|
||||
all, = mse1.shape
|
||||
for i in range(all):
|
||||
if (mse1[i] < max[0]):
|
||||
missNum += 1
|
||||
missList.append(mse1[i])
|
||||
|
||||
miss_rate = missNum / all
|
||||
print("漏报率:", miss_rate)
|
||||
|
||||
# 总体图
|
||||
print("mse:", mse)
|
||||
print("mse1:", mse1)
|
||||
print("============================================")
|
||||
total_mse = np.concatenate([mse, mse1], axis=0)
|
||||
total_max = np.broadcast_to(max[0], shape=[total_mse.shape[0], ])
|
||||
# min = np.broadcast_to(min,shape=[dims,])
|
||||
total_mean = np.broadcast_to(mean[0], shape=[total_mse.shape[0], ])
|
||||
|
||||
if isSave:
|
||||
save_mse_name1=save_mse_name[:-4]+"_predict"+str(j+1)+".csv"
|
||||
save_max_name1=save_max_name[:-4]+"_predict"+str(j+1)+".csv"
|
||||
|
||||
np.savetxt(save_mse_name1,total_mse, delimiter=',')
|
||||
np.savetxt(save_max_name1,total_max, delimiter=',')
|
||||
|
||||
|
||||
plt.figure(random.randint(1, 100))
|
||||
plt.plot(total_max)
|
||||
plt.plot(total_mse)
|
||||
plt.plot(total_mean)
|
||||
# plt.plot(min)
|
||||
plt.show()
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
total_data = loadData.execute(N=feature_num, file_name=file_name)
|
||||
total_data = normalization(data=total_data)
|
||||
train_data_healthy, train_label1_healthy, train_label2_healthy = get_training_data_overlapping(
|
||||
total_data[:healthy_date, :], is_Healthy=True)
|
||||
train_data_unhealthy, train_label1_unhealthy, train_label2_unhealthy = get_training_data_overlapping(
|
||||
total_data[healthy_date - time_stamp + unhealthy_patience:unhealthy_date, :],
|
||||
is_Healthy=False)
|
||||
#### TODO 第一步训练
|
||||
# 单次测试
|
||||
# train_step_one(train_data=train_data_healthy[:256, :, :], train_label1=train_label1_healthy[:256, :],
|
||||
# train_label2=train_label2_healthy[:256, ])
|
||||
#### 模型训练
|
||||
train_step_one(train_data=train_data_healthy, train_label1=train_label1_healthy, train_label2=train_label2_healthy)
|
||||
|
||||
# 导入第一步已经训练好的模型,一个继续训练,一个只输出结果
|
||||
step_one_model = Joint_Monitoring()
|
||||
step_one_model.load_weights(save_name)
|
||||
#
|
||||
# step_two_model = Joint_Monitoring()
|
||||
# step_two_model.load_weights(save_name)
|
||||
|
||||
# #### TODO 计算MSE
|
||||
healthy_size, _, _ = train_data_healthy.shape
|
||||
unhealthy_size, _, _ = train_data_unhealthy.shape
|
||||
all_data, _, _ = get_training_data_overlapping(
|
||||
total_data[healthy_size - 2 * unhealthy_size:unhealthy_date, :], is_Healthy=True)
|
||||
|
||||
##出结果单次测试
|
||||
# getResult(step_one_model,
|
||||
# healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:healthy_size - 2 * unhealthy_size + 200,
|
||||
# :],
|
||||
# healthy_label=train_label1_healthy[
|
||||
# healthy_size - 2 * unhealthy_size:healthy_size - 2 * unhealthy_size + 200, :],
|
||||
# unhealthy_data=train_data_unhealthy[:200, :], unhealthy_label=train_label1_unhealthy[:200, :],isSave=True)
|
||||
|
||||
getResult(step_one_model, healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||
healthy_label=train_label1_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||
unhealthy_data=train_data_unhealthy, unhealthy_label=train_label1_unhealthy, isSave=True)
|
||||
|
||||
###TODO 展示全部的结果
|
||||
# all_data, _, _ = get_training_data_overlapping(
|
||||
# total_data[healthy_size - 2 * unhealthy_size:unhealthy_date, :], is_Healthy=True)
|
||||
# all_data = np.concatenate([])
|
||||
# 单次测试
|
||||
# showResult(step_two_model, test_data=all_data[:32], isPlot=True)
|
||||
# showResult(step_two_model, test_data=all_data, isPlot=True)
|
||||
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -72,40 +72,40 @@ class Joint_Monitoring(keras.Model):
|
|||
RepDCBlock1 = tf.keras.layers.BatchNormalization()(RepDCBlock1)
|
||||
conv1 = self.conv1(RepDCBlock1)
|
||||
conv1 = tf.nn.leaky_relu(conv1)
|
||||
conv1 = tf.keras.layers.BatchNormalization()(conv1)
|
||||
# conv1 = tf.keras.layers.BatchNormalization()(conv1)
|
||||
upsample1 = self.upsample1(conv1)
|
||||
|
||||
# DACU2 = self.DACU2(upsample1)
|
||||
DACU2 = tf.keras.layers.BatchNormalization()(upsample1)
|
||||
RepDCBlock2 = self.RepDCBlock2(DACU2)
|
||||
RepDCBlock2 = tf.keras.layers.BatchNormalization()(RepDCBlock2)
|
||||
# DACU2 = tf.keras.layers.BatchNormalization()(upsample1)
|
||||
RepDCBlock2 = self.RepDCBlock2(upsample1)
|
||||
# RepDCBlock2 = tf.keras.layers.BatchNormalization()(RepDCBlock2)
|
||||
conv2 = self.conv2(RepDCBlock2)
|
||||
conv2 = tf.nn.leaky_relu(conv2)
|
||||
conv2 = tf.keras.layers.BatchNormalization()(conv2)
|
||||
# conv2 = tf.keras.layers.BatchNormalization()(conv2)
|
||||
upsample2 = self.upsample2(conv2)
|
||||
|
||||
# DACU3 = self.DACU3(upsample2)
|
||||
DACU3 = tf.keras.layers.BatchNormalization()(upsample2)
|
||||
RepDCBlock3 = self.RepDCBlock3(DACU3)
|
||||
RepDCBlock3 = tf.keras.layers.BatchNormalization()(RepDCBlock3)
|
||||
# DACU3 = tf.keras.layers.BatchNormalization()(upsample2)
|
||||
RepDCBlock3 = self.RepDCBlock3(upsample2)
|
||||
# RepDCBlock3 = tf.keras.layers.BatchNormalization()(RepDCBlock3)
|
||||
conv3 = self.conv3(RepDCBlock3)
|
||||
conv3 = tf.nn.leaky_relu(conv3)
|
||||
conv3 = tf.keras.layers.BatchNormalization()(conv3)
|
||||
# conv3 = tf.keras.layers.BatchNormalization()(conv3)
|
||||
|
||||
concat1 = tf.concat([conv2, conv3], axis=1)
|
||||
|
||||
# DACU4 = self.DACU4(concat1)
|
||||
DACU4 = tf.keras.layers.BatchNormalization()(concat1)
|
||||
RepDCBlock4 = self.RepDCBlock4(DACU4)
|
||||
RepDCBlock4 = tf.keras.layers.BatchNormalization()(RepDCBlock4)
|
||||
# DACU4 = tf.keras.layers.BatchNormalization()(concat1)
|
||||
RepDCBlock4 = self.RepDCBlock4(concat1)
|
||||
# RepDCBlock4 = tf.keras.layers.BatchNormalization()(RepDCBlock4)
|
||||
conv4 = self.conv4(RepDCBlock4)
|
||||
conv4 = tf.nn.leaky_relu(conv4)
|
||||
conv4 = tf.keras.layers.BatchNormalization()(conv4)
|
||||
# conv4 = tf.keras.layers.BatchNormalization()(conv4)
|
||||
|
||||
concat2 = tf.concat([conv1, conv4], axis=1)
|
||||
|
||||
RepDCBlock5 = self.RepDCBlock5(concat2)
|
||||
RepDCBlock5 = tf.keras.layers.BatchNormalization()(RepDCBlock5)
|
||||
# RepDCBlock5 = tf.keras.layers.BatchNormalization()(RepDCBlock5)
|
||||
|
||||
output1 = []
|
||||
output2 = []
|
||||
|
|
@ -117,25 +117,25 @@ class Joint_Monitoring(keras.Model):
|
|||
# 重现原数据
|
||||
# 接block3
|
||||
GRU1 = self.GRU1(RepDCBlock3)
|
||||
GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
|
||||
# GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
|
||||
d1 = self.d1(GRU1)
|
||||
# tf.nn.softmax
|
||||
output1 = self.output1(d1)
|
||||
# 接block4
|
||||
GRU2 = self.GRU2(RepDCBlock4)
|
||||
GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
|
||||
# GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
|
||||
d2 = self.d2(GRU2)
|
||||
# tf.nn.softmax
|
||||
output2 = self.output2(d2)
|
||||
# 接block5
|
||||
GRU3 = self.GRU3(RepDCBlock5)
|
||||
GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
|
||||
# GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
|
||||
d3 = self.d3(GRU3)
|
||||
# tf.nn.softmax
|
||||
output3 = self.output3(d3)
|
||||
else:
|
||||
GRU1 = self.GRU1(RepDCBlock3)
|
||||
GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
|
||||
# GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
|
||||
d1 = self.d1(GRU1)
|
||||
# tf.nn.softmax
|
||||
output1 = self.output1(d1)
|
||||
|
|
@ -147,7 +147,7 @@ class Joint_Monitoring(keras.Model):
|
|||
output2 = self.output2(d2)
|
||||
# 接block5
|
||||
GRU3 = self.GRU3(RepDCBlock5)
|
||||
GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
|
||||
# GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
|
||||
d3 = self.d3(GRU3)
|
||||
# tf.nn.softmax
|
||||
output3 = self.output3(d3)
|
||||
|
|
@ -175,73 +175,73 @@ class Joint_Monitoring(keras.Model):
|
|||
pred_5=None):
|
||||
# step one
|
||||
RepDCBlock1 = self.RepDCBlock1(inputs_tensor)
|
||||
RepDCBlock1 = tf.keras.layers.BatchNormalization()(RepDCBlock1)
|
||||
# RepDCBlock1 = tf.keras.layers.BatchNormalization()(RepDCBlock1)
|
||||
conv1 = self.conv1(RepDCBlock1)
|
||||
conv1 = tf.nn.leaky_relu(conv1)
|
||||
conv1 = tf.keras.layers.BatchNormalization()(conv1)
|
||||
# conv1 = tf.keras.layers.BatchNormalization()(conv1)
|
||||
upsample1 = self.upsample1(conv1)
|
||||
|
||||
# DACU2 = self.DACU2(upsample1)
|
||||
DACU2 = tf.keras.layers.BatchNormalization()(upsample1)
|
||||
RepDCBlock2 = self.RepDCBlock2(DACU2)
|
||||
RepDCBlock2 = tf.keras.layers.BatchNormalization()(RepDCBlock2)
|
||||
# DACU2 = tf.keras.layers.BatchNormalization()(upsample1)
|
||||
RepDCBlock2 = self.RepDCBlock2(upsample1)
|
||||
# RepDCBlock2 = tf.keras.layers.BatchNormalization()(RepDCBlock2)
|
||||
conv2 = self.conv2(RepDCBlock2)
|
||||
conv2 = tf.nn.leaky_relu(conv2)
|
||||
conv2 = tf.keras.layers.BatchNormalization()(conv2)
|
||||
# conv2 = tf.keras.layers.BatchNormalization()(conv2)
|
||||
upsample2 = self.upsample2(conv2)
|
||||
|
||||
# DACU3 = self.DACU3(upsample2)
|
||||
DACU3 = tf.keras.layers.BatchNormalization()(upsample2)
|
||||
RepDCBlock3 = self.RepDCBlock3(DACU3)
|
||||
RepDCBlock3 = tf.keras.layers.BatchNormalization()(RepDCBlock3)
|
||||
# DACU3 = tf.keras.layers.BatchNormalization()(upsample2)
|
||||
RepDCBlock3 = self.RepDCBlock3(upsample2)
|
||||
# RepDCBlock3 = tf.keras.layers.BatchNormalization()(RepDCBlock3)
|
||||
conv3 = self.conv3(RepDCBlock3)
|
||||
conv3 = tf.nn.leaky_relu(conv3)
|
||||
conv3 = tf.keras.layers.BatchNormalization()(conv3)
|
||||
# conv3 = tf.keras.layers.BatchNormalization()(conv3)
|
||||
|
||||
concat1 = tf.concat([conv2, conv3], axis=1)
|
||||
|
||||
# DACU4 = self.DACU4(concat1)
|
||||
DACU4 = tf.keras.layers.BatchNormalization()(concat1)
|
||||
RepDCBlock4 = self.RepDCBlock4(DACU4)
|
||||
RepDCBlock4 = tf.keras.layers.BatchNormalization()(RepDCBlock4)
|
||||
# DACU4 = tf.keras.layers.BatchNormalization()(concat1)
|
||||
RepDCBlock4 = self.RepDCBlock4(concat1)
|
||||
# RepDCBlock4 = tf.keras.layers.BatchNormalization()(RepDCBlock4)
|
||||
conv4 = self.conv4(RepDCBlock4)
|
||||
conv4 = tf.nn.leaky_relu(conv4)
|
||||
conv4 = tf.keras.layers.BatchNormalization()(conv4)
|
||||
# conv4 = tf.keras.layers.BatchNormalization()(conv4)
|
||||
|
||||
concat2 = tf.concat([conv1, conv4], axis=1)
|
||||
|
||||
RepDCBlock5 = self.RepDCBlock5(concat2)
|
||||
RepDCBlock5 = tf.keras.layers.BatchNormalization()(RepDCBlock5)
|
||||
# RepDCBlock5 = tf.keras.layers.BatchNormalization()(RepDCBlock5)
|
||||
|
||||
if is_first_time:
|
||||
# step two
|
||||
# 重现原数据
|
||||
# 接block3
|
||||
GRU1 = self.GRU1(RepDCBlock3)
|
||||
GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
|
||||
# GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
|
||||
d1 = self.d1(GRU1)
|
||||
# tf.nn.softmax
|
||||
output1 = self.output1(d1)
|
||||
# 接block4
|
||||
GRU2 = self.GRU2(RepDCBlock4)
|
||||
GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
|
||||
# GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
|
||||
d2 = self.d2(GRU2)
|
||||
# tf.nn.softmax
|
||||
output2 = self.output2(d2)
|
||||
# 接block5
|
||||
GRU3 = self.GRU3(RepDCBlock5)
|
||||
GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
|
||||
# GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
|
||||
d3 = self.d3(GRU3)
|
||||
# tf.nn.softmax
|
||||
output3 = self.output3(d3)
|
||||
|
||||
# reduce_mean降维计算均值
|
||||
MSE_loss1 = SmoothL1Loss()(y_true=label1, y_pred=output1)
|
||||
MSE_loss2 = SmoothL1Loss()(y_true=label1, y_pred=output2)
|
||||
MSE_loss3 = SmoothL1Loss()(y_true=label1, y_pred=output3)
|
||||
# MSE_loss1 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output1))
|
||||
# MSE_loss2 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output2))
|
||||
# MSE_loss3 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output3))
|
||||
# MSE_loss1 = SmoothL1Loss()(y_true=label1, y_pred=output1)
|
||||
# MSE_loss2 = SmoothL1Loss()(y_true=label1, y_pred=output2)
|
||||
# MSE_loss3 = SmoothL1Loss()(y_true=label1, y_pred=output3)
|
||||
MSE_loss1 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output1))
|
||||
MSE_loss2 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output2))
|
||||
MSE_loss3 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output3))
|
||||
|
||||
print("MSE_loss1:", MSE_loss1.numpy())
|
||||
print("MSE_loss2:", MSE_loss2.numpy())
|
||||
|
|
@ -254,19 +254,19 @@ class Joint_Monitoring(keras.Model):
|
|||
# 重现原数据
|
||||
# 接block3
|
||||
GRU1 = self.GRU1(RepDCBlock3)
|
||||
GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
|
||||
# GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
|
||||
d1 = self.d1(GRU1)
|
||||
# tf.nn.softmax
|
||||
output1 = self.output1(d1)
|
||||
# 接block4
|
||||
GRU2 = self.GRU2(RepDCBlock4)
|
||||
GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
|
||||
# GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
|
||||
d2 = self.d2(GRU2)
|
||||
# tf.nn.softmax
|
||||
output2 = self.output2(d2)
|
||||
# 接block5
|
||||
GRU3 = self.GRU3(RepDCBlock5)
|
||||
GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
|
||||
# GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
|
||||
d3 = self.d3(GRU3)
|
||||
# tf.nn.softmax
|
||||
output3 = self.output3(d3)
|
||||
|
|
|
|||
Loading…
Reference in New Issue