leecode更新
This commit is contained in:
parent
b03cc42571
commit
1b92547add
|
|
@ -0,0 +1,120 @@
|
||||||
|
package com.markilue.leecode.backtrace;
|
||||||
|
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.HashSet;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @BelongsProject: Leecode
|
||||||
|
* @BelongsPackage: com.markilue.leecode.backtrace
|
||||||
|
* @Author: markilue
|
||||||
|
* @CreateTime: 2022-10-20 09:46
|
||||||
|
* @Description: TODO 力扣47题 全排列II:
|
||||||
|
* 给定一个可包含重复数字的序列 nums ,按任意顺序 返回所有不重复的全排列。
|
||||||
|
* @Version: 1.0
|
||||||
|
*/
|
||||||
|
public class PermuteUnique {
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void test() {
|
||||||
|
int[] nums = {1, 1, 2};
|
||||||
|
System.out.println(permuteUnique(nums));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void test1() {
|
||||||
|
int[] nums = {1, 2, 3};
|
||||||
|
System.out.println(permuteUnique(nums));
|
||||||
|
}
|
||||||
|
|
||||||
|
List<List<Integer>> result = new ArrayList<>();
|
||||||
|
List<Integer> cur = new ArrayList<>();
|
||||||
|
int[] index = new int[10];
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 思路:要求不重复且不要求顺序,则还是两种方式:
|
||||||
|
* 1)第一种即排序之后使用used数组记录是否用过,同一树层不能使用相同的数据,不同树层可以使用相同的数;
|
||||||
|
* 2)使用map记录数字次数
|
||||||
|
* 这里尝试使用1),但使用set了
|
||||||
|
* 速度击败44.42%,内存击败39.13%
|
||||||
|
*
|
||||||
|
* @param nums
|
||||||
|
* @return
|
||||||
|
*/
|
||||||
|
public List<List<Integer>> permuteUnique(int[] nums) {
|
||||||
|
|
||||||
|
boolean[] used = new boolean[nums.length];
|
||||||
|
Arrays.sort(nums);
|
||||||
|
backtracking(nums, used);
|
||||||
|
|
||||||
|
// backtracking(nums);
|
||||||
|
return result;
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
public void backtracking(int[] nums) {
|
||||||
|
|
||||||
|
// if(start>nums.length){
|
||||||
|
// return;
|
||||||
|
// }
|
||||||
|
|
||||||
|
if (cur.size() == nums.length) {
|
||||||
|
result.add(new ArrayList<>(cur));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
HashSet<Integer> set = new HashSet<>();
|
||||||
|
for (int i = 0; i < nums.length; i++) {
|
||||||
|
|
||||||
|
if (set.contains(nums[i]) || index[i] == 1) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
index[i] = 1;
|
||||||
|
set.add(nums[i]);
|
||||||
|
cur.add(nums[i]);
|
||||||
|
backtracking(nums);
|
||||||
|
cur.remove(cur.size() - 1);
|
||||||
|
index[i] = 0;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 速度击败99.85%,内存击败89.51%
|
||||||
|
* @param nums
|
||||||
|
* @param used
|
||||||
|
*/
|
||||||
|
public void backtracking(int[] nums, boolean[] used) {
|
||||||
|
|
||||||
|
|
||||||
|
if (cur.size() == nums.length) {
|
||||||
|
result.add(new ArrayList<>(cur));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int i = 0; i < nums.length; i++) {
|
||||||
|
|
||||||
|
if ((i > 0 && nums[i] == nums[i - 1] && used[i - 1] == false)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (used[i] == false) {
|
||||||
|
// index[i] = 1;
|
||||||
|
used[i] = true;
|
||||||
|
cur.add(nums[i]);
|
||||||
|
backtracking(nums,used);
|
||||||
|
cur.remove(cur.size() - 1);
|
||||||
|
// index[i] = 0;
|
||||||
|
used[i] = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,237 @@
|
||||||
|
package com.markilue.leecode.backtrace;
|
||||||
|
|
||||||
|
import com.sun.crypto.provider.PBEWithMD5AndDESCipher;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import java.util.*;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @BelongsProject: Leecode
|
||||||
|
* @BelongsPackage: com.markilue.leecode.backtrace
|
||||||
|
* @Author: markilue
|
||||||
|
* @CreateTime: 2022-10-20 11:14
|
||||||
|
* @Description: TODO 力扣51题 N皇后:
|
||||||
|
* 按照国际象棋的规则,皇后可以攻击与之处在同一行或同一列或同一斜线上的棋子。
|
||||||
|
* n 皇后问题 研究的是如何将 n 个皇后放置在 n×n 的棋盘上,并且使皇后彼此之间不能相互攻击。
|
||||||
|
* 给你一个整数 n ,返回所有不同的 n 皇后问题 的解决方案。
|
||||||
|
* 每一种解法包含一个不同的 n 皇后问题 的棋子放置方案,该方案中 'Q' 和 '.' 分别代表了皇后和空位。
|
||||||
|
* @Version: 1.0
|
||||||
|
*/
|
||||||
|
public class SolveNQueens {
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void test() {
|
||||||
|
// StringBuilder builder=new StringBuilder("1");
|
||||||
|
// System.out.println(builder.delete(0, builder.length()-1));
|
||||||
|
|
||||||
|
System.out.println(solveNQueens(5));
|
||||||
|
}
|
||||||
|
@Test
|
||||||
|
public void test1() {
|
||||||
|
// StringBuilder builder=new StringBuilder("1");
|
||||||
|
// System.out.println(builder.delete(0, builder.length()-1));
|
||||||
|
|
||||||
|
System.out.println(solveNQueens(8).size());
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 主要是注意条件,同行同列斜线都不行
|
||||||
|
* 速度击败39.74%,内存击败92.94%
|
||||||
|
* @param n
|
||||||
|
* @return
|
||||||
|
*/
|
||||||
|
public List<List<String>> solveNQueens(int n) {
|
||||||
|
int[] used = new int[n];
|
||||||
|
// for (int i = 0; i < n; i++) {
|
||||||
|
// used[i]=Integer.MIN_VALUE;
|
||||||
|
// }
|
||||||
|
backtracking1(n, used, 0);
|
||||||
|
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
List<List<String>> result = new ArrayList<>();
|
||||||
|
List<String> cur = new ArrayList<>();
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* used数组记录位置使用情况,used[i]=k表示第i行第k列被占用
|
||||||
|
* value表示当前是在放置第多少列了
|
||||||
|
* 效率低下原因,for循环需要检查前面所有的列,效率太低下了:是否可以拿一个set记录一下不能放的位置?
|
||||||
|
* @param n
|
||||||
|
* @param used
|
||||||
|
*/
|
||||||
|
public void backtracking(int n, int[] used, int value) {
|
||||||
|
|
||||||
|
if (cur.size() == n) {
|
||||||
|
result.add(new ArrayList<>(cur));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
StringBuilder stringBuilder = new StringBuilder();
|
||||||
|
|
||||||
|
//横向行走
|
||||||
|
for (int i = 0; i < n; i++) {
|
||||||
|
|
||||||
|
boolean flag = true;
|
||||||
|
//不合规则
|
||||||
|
for (int j = 0; j < value; j++) {
|
||||||
|
if(!check(j,used[j],value,i)){
|
||||||
|
flag=false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!flag) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
used[value] = i;
|
||||||
|
|
||||||
|
for (int j = 0; j < i; j++) {
|
||||||
|
stringBuilder.append(".");
|
||||||
|
}
|
||||||
|
stringBuilder.append("Q");
|
||||||
|
for (int j = i + 1; j < n; j++) {
|
||||||
|
stringBuilder.append(".");
|
||||||
|
}
|
||||||
|
cur.add(stringBuilder.toString());
|
||||||
|
//纵向行走
|
||||||
|
backtracking(n, used, value + 1);
|
||||||
|
cur.remove(cur.size() - 1);
|
||||||
|
stringBuilder.delete(0, stringBuilder.length());
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* used数组记录位置使用情况,used[i]=k表示第i行第k列被占用
|
||||||
|
* value表示当前是在放置第多少列了
|
||||||
|
* 优化stringbuilder,使用replace方法
|
||||||
|
* 好像效率反而降低了
|
||||||
|
* @param n
|
||||||
|
* @param used
|
||||||
|
*/
|
||||||
|
public void backtracking1(int n, int[] used, int value) {
|
||||||
|
|
||||||
|
if (cur.size() == n) {
|
||||||
|
result.add(new ArrayList<>(cur));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
StringBuilder build=new StringBuilder();
|
||||||
|
for (int i = 0; i < n; i++) {
|
||||||
|
build.append(".");
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
StringBuilder stringBuilder = new StringBuilder(build);
|
||||||
|
|
||||||
|
//横向行走
|
||||||
|
for (int i = 0; i < n; i++) {
|
||||||
|
|
||||||
|
boolean flag = true;
|
||||||
|
//不合规则
|
||||||
|
for (int j = 0; j < value; j++) {
|
||||||
|
if(!check(j,used[j],value,i)){
|
||||||
|
flag=false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!flag) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
used[value] = i;
|
||||||
|
stringBuilder.replace(i,i+1,"Q");
|
||||||
|
cur.add(stringBuilder.toString());
|
||||||
|
//纵向行走
|
||||||
|
backtracking(n, used, value + 1);
|
||||||
|
cur.remove(cur.size() - 1);
|
||||||
|
stringBuilder.replace(i,i+1,".");
|
||||||
|
// stringBuilder.delete(i, stringBuilder.length());
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
public boolean check(int i,int value1,int j,int value2){
|
||||||
|
|
||||||
|
//同列
|
||||||
|
if(value1==value2){
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
//斜线
|
||||||
|
if(Math.abs(i-j)== Math.abs(value1-value2)){
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 官方代码:利用set记录不能使用的行列。
|
||||||
|
* 效率好像也不高:击败39.74%
|
||||||
|
* @param n
|
||||||
|
* @return
|
||||||
|
*/
|
||||||
|
public List<List<String>> solveNQueens1(int n) {
|
||||||
|
List<List<String>> solutions = new ArrayList<List<String>>();
|
||||||
|
int[] queens = new int[n];
|
||||||
|
Arrays.fill(queens, -1);
|
||||||
|
Set<Integer> columns = new HashSet<Integer>();
|
||||||
|
Set<Integer> diagonals1 = new HashSet<Integer>();
|
||||||
|
Set<Integer> diagonals2 = new HashSet<Integer>();
|
||||||
|
backtrack(solutions, queens, n, 0, columns, diagonals1, diagonals2);
|
||||||
|
return solutions;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void backtrack(List<List<String>> solutions, int[] queens, int n, int row, Set<Integer> columns, Set<Integer> diagonals1, Set<Integer> diagonals2) {
|
||||||
|
if (row == n) {
|
||||||
|
List<String> board = generateBoard(queens, n);
|
||||||
|
solutions.add(board);
|
||||||
|
} else {
|
||||||
|
for (int i = 0; i < n; i++) {
|
||||||
|
//竖着的
|
||||||
|
if (columns.contains(i)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
//斜线
|
||||||
|
int diagonal1 = row - i;
|
||||||
|
if (diagonals1.contains(diagonal1)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
int diagonal2 = row + i;
|
||||||
|
if (diagonals2.contains(diagonal2)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
queens[row] = i;
|
||||||
|
columns.add(i);
|
||||||
|
diagonals1.add(diagonal1);
|
||||||
|
diagonals2.add(diagonal2);
|
||||||
|
backtrack(solutions, queens, n, row + 1, columns, diagonals1, diagonals2);
|
||||||
|
queens[row] = -1;
|
||||||
|
columns.remove(i);
|
||||||
|
diagonals1.remove(diagonal1);
|
||||||
|
diagonals2.remove(diagonal2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public List<String> generateBoard(int[] queens, int n) {
|
||||||
|
List<String> board = new ArrayList<String>();
|
||||||
|
for (int i = 0; i < n; i++) {
|
||||||
|
char[] row = new char[n];
|
||||||
|
Arrays.fill(row, '.');
|
||||||
|
row[queens[i]] = 'Q';
|
||||||
|
board.add(new String(row));
|
||||||
|
}
|
||||||
|
return board;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,711 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# coding: utf-8
|
||||||
|
|
||||||
|
'''
|
||||||
|
@Author : dingjiawen
|
||||||
|
@Date : 2022/10/11 18:55
|
||||||
|
@Usage :
|
||||||
|
@Desc : RNet直接进行分类
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
import tensorflow as tf
|
||||||
|
import tensorflow.keras
|
||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
from model.DepthwiseCon1D.DepthwiseConv1D import DepthwiseConv1D
|
||||||
|
from model.Dynamic_channelAttention.Dynamic_channelAttention import DynamicChannelAttention
|
||||||
|
from condition_monitoring.data_deal import loadData
|
||||||
|
from model.Joint_Monitoring.compare.RNet_3 import Joint_Monitoring
|
||||||
|
|
||||||
|
from model.CommonFunction.CommonFunction import *
|
||||||
|
from sklearn.model_selection import train_test_split
|
||||||
|
from tensorflow.keras.models import load_model, save_model
|
||||||
|
import random
|
||||||
|
|
||||||
|
'''超参数设置'''
|
||||||
|
time_stamp = 120
|
||||||
|
feature_num = 10
|
||||||
|
batch_size = 16
|
||||||
|
learning_rate = 0.001
|
||||||
|
EPOCH = 101
|
||||||
|
model_name = "RNet_C"
|
||||||
|
'''EWMA超参数'''
|
||||||
|
K = 18
|
||||||
|
namuda = 0.01
|
||||||
|
'''保存名称'''
|
||||||
|
|
||||||
|
save_name = "./model/weight/{0}/weight".format(model_name,
|
||||||
|
time_stamp,
|
||||||
|
feature_num,
|
||||||
|
batch_size,
|
||||||
|
EPOCH)
|
||||||
|
save_step_two_name = "./model/two_weight/{0}_weight_epoch6_99899_9996/weight".format(model_name,
|
||||||
|
time_stamp,
|
||||||
|
feature_num,
|
||||||
|
batch_size,
|
||||||
|
EPOCH)
|
||||||
|
|
||||||
|
save_mse_name = "./mse/RNet_C/{0}_timestamp{1}_feature{2}_result.csv".format(model_name,
|
||||||
|
time_stamp,
|
||||||
|
feature_num,
|
||||||
|
batch_size,
|
||||||
|
EPOCH)
|
||||||
|
save_max_name = "./mse/RNet_C/{0}_timestamp{1}_feature{2}_max.csv".format(model_name,
|
||||||
|
time_stamp,
|
||||||
|
feature_num,
|
||||||
|
batch_size,
|
||||||
|
EPOCH)
|
||||||
|
|
||||||
|
'''文件名'''
|
||||||
|
file_name = "G:\data\SCADA数据\jb4q_8_delete_total_zero.csv"
|
||||||
|
|
||||||
|
'''
|
||||||
|
文件说明:jb4q_8_delete_total_zero.csv是删除了只删除了全是0的列的文件
|
||||||
|
文件从0:415548行均是正常值(2019/7.30 00:00:00 - 2019/9/18 11:14:00)
|
||||||
|
从415549:432153行均是异常值(2019/9/18 11:21:01 - 2021/1/18 00:00:00)
|
||||||
|
'''
|
||||||
|
'''文件参数'''
|
||||||
|
# 最后正常的时间点
|
||||||
|
healthy_date = 415548
|
||||||
|
# 最后异常的时间点
|
||||||
|
unhealthy_date = 432153
|
||||||
|
# 异常容忍程度
|
||||||
|
unhealthy_patience = 5
|
||||||
|
|
||||||
|
|
||||||
|
# 画图相关设置
|
||||||
|
font1 = {'family': 'Times New Roman', 'weight': 'normal', 'size': 10} # 设置坐标标签的字体大小,字体
|
||||||
|
|
||||||
|
|
||||||
|
def remove(data, time_stamp=time_stamp):
|
||||||
|
rows, cols = data.shape
|
||||||
|
print("remove_data.shape:", data.shape)
|
||||||
|
num = int(rows / time_stamp)
|
||||||
|
|
||||||
|
return data[:num * time_stamp, :]
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# 不重叠采样
|
||||||
|
def get_training_data(data, time_stamp: int = time_stamp):
|
||||||
|
removed_data = remove(data=data)
|
||||||
|
rows, cols = removed_data.shape
|
||||||
|
print("removed_data.shape:", data.shape)
|
||||||
|
print("removed_data:", removed_data)
|
||||||
|
train_data = np.reshape(removed_data, [-1, time_stamp, cols])
|
||||||
|
print("train_data:", train_data)
|
||||||
|
batchs, time_stamp, cols = train_data.shape
|
||||||
|
|
||||||
|
for i in range(1, batchs):
|
||||||
|
each_label = np.expand_dims(train_data[i, 0, :], axis=0)
|
||||||
|
if i == 1:
|
||||||
|
train_label = each_label
|
||||||
|
else:
|
||||||
|
train_label = np.concatenate([train_label, each_label], axis=0)
|
||||||
|
|
||||||
|
print("train_data.shape:", train_data.shape)
|
||||||
|
print("train_label.shape", train_label.shape)
|
||||||
|
return train_data[:-1, :], train_label
|
||||||
|
|
||||||
|
|
||||||
|
# 重叠采样
|
||||||
|
def get_training_data_overlapping(data, time_stamp: int = time_stamp, is_Healthy: bool = True):
|
||||||
|
rows, cols = data.shape
|
||||||
|
train_data = np.empty(shape=[rows - time_stamp - 1, time_stamp, cols])
|
||||||
|
train_label = np.empty(shape=[rows - time_stamp - 1, cols])
|
||||||
|
for i in range(rows):
|
||||||
|
if i + time_stamp >= rows:
|
||||||
|
break
|
||||||
|
if i + time_stamp < rows - 1:
|
||||||
|
train_data[i] = data[i:i + time_stamp]
|
||||||
|
train_label[i] = data[i + time_stamp]
|
||||||
|
|
||||||
|
print("重叠采样以后:")
|
||||||
|
print("data:", train_data) # (300334,120,10)
|
||||||
|
print("label:", train_label) # (300334,10)
|
||||||
|
|
||||||
|
if is_Healthy:
|
||||||
|
train_label2 = np.ones(shape=[train_label.shape[0]])
|
||||||
|
else:
|
||||||
|
train_label2 = np.zeros(shape=[train_label.shape[0]])
|
||||||
|
|
||||||
|
print("label2:", train_label2)
|
||||||
|
|
||||||
|
return train_data, train_label, train_label2
|
||||||
|
|
||||||
|
|
||||||
|
# RepConv重参数化卷积
|
||||||
|
def RepConv(input_tensor, k=3):
|
||||||
|
_, _, output_dim = input_tensor.shape
|
||||||
|
conv1 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=k, strides=1, padding='SAME')(input_tensor)
|
||||||
|
b1 = tf.keras.layers.BatchNormalization()(conv1)
|
||||||
|
|
||||||
|
conv2 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=1, strides=1, padding='SAME')(input_tensor)
|
||||||
|
b2 = tf.keras.layers.BatchNormalization()(conv2)
|
||||||
|
|
||||||
|
b3 = tf.keras.layers.BatchNormalization()(input_tensor)
|
||||||
|
|
||||||
|
out = tf.keras.layers.Add()([b1, b2, b3])
|
||||||
|
out = tf.nn.relu(out)
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
# RepBlock模块
|
||||||
|
def RepBlock(input_tensor, num: int = 3):
|
||||||
|
for i in range(num):
|
||||||
|
input_tensor = RepConv(input_tensor)
|
||||||
|
return input_tensor
|
||||||
|
|
||||||
|
|
||||||
|
# GAP 全局平均池化
|
||||||
|
def Global_avg_channelAttention(input_tensor):
|
||||||
|
_, length, channel = input_tensor.shape
|
||||||
|
DWC1 = DepthwiseConv1D(kernel_size=1, padding='SAME')(input_tensor)
|
||||||
|
GAP = tf.keras.layers.GlobalAvgPool1D()(DWC1)
|
||||||
|
c1 = tf.keras.layers.Conv1D(filters=channel, kernel_size=1, padding='SAME')(GAP)
|
||||||
|
s1 = tf.nn.sigmoid(c1)
|
||||||
|
output = tf.multiply(input_tensor, s1)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
# GDP 全局动态池化
|
||||||
|
def Global_Dynamic_channelAttention(input_tensor):
|
||||||
|
_, length, channel = input_tensor.shape
|
||||||
|
DWC1 = DepthwiseConv1D(kernel_size=1, padding='SAME')(input_tensor)
|
||||||
|
|
||||||
|
# GAP
|
||||||
|
GAP = tf.keras.layers.GlobalAvgPool1D()(DWC1)
|
||||||
|
c1 = tf.keras.layers.Conv1D(filters=channel, kernel_size=1, padding='SAME')(GAP)
|
||||||
|
s1 = tf.nn.sigmoid(c1)
|
||||||
|
|
||||||
|
# GMP
|
||||||
|
GMP = tf.keras.layers.GlobalMaxPool1D()(DWC1)
|
||||||
|
c2 = tf.keras.layers.Conv1D(filters=channel, kernel_size=1, padding='SAME')(GMP)
|
||||||
|
s3 = tf.nn.sigmoid(c2)
|
||||||
|
|
||||||
|
output = tf.multiply(input_tensor, s1)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
# 归一化
|
||||||
|
def normalization(data):
|
||||||
|
rows, cols = data.shape
|
||||||
|
print("归一化之前:", data)
|
||||||
|
print(data.shape)
|
||||||
|
print("======================")
|
||||||
|
|
||||||
|
# 归一化
|
||||||
|
max = np.max(data, axis=0)
|
||||||
|
max = np.broadcast_to(max, [rows, cols])
|
||||||
|
min = np.min(data, axis=0)
|
||||||
|
min = np.broadcast_to(min, [rows, cols])
|
||||||
|
|
||||||
|
data = (data - min) / (max - min)
|
||||||
|
print("归一化之后:", data)
|
||||||
|
print(data.shape)
|
||||||
|
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
# 正则化
|
||||||
|
def Regularization(data):
|
||||||
|
rows, cols = data.shape
|
||||||
|
print("正则化之前:", data)
|
||||||
|
print(data.shape)
|
||||||
|
print("======================")
|
||||||
|
|
||||||
|
# 正则化
|
||||||
|
mean = np.mean(data, axis=0)
|
||||||
|
mean = np.broadcast_to(mean, shape=[rows, cols])
|
||||||
|
dst = np.sqrt(np.var(data, axis=0))
|
||||||
|
dst = np.broadcast_to(dst, shape=[rows, cols])
|
||||||
|
data = (data - mean) / dst
|
||||||
|
print("正则化之后:", data)
|
||||||
|
print(data.shape)
|
||||||
|
|
||||||
|
return data
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def EWMA(data, K=K, namuda=namuda):
|
||||||
|
# t是啥暂时未知
|
||||||
|
t = 0
|
||||||
|
mid = np.mean(data, axis=0)
|
||||||
|
standard = np.sqrt(np.var(data, axis=0))
|
||||||
|
UCL = mid + K * standard * np.sqrt(namuda / (2 - namuda) * (1 - (1 - namuda) ** 2 * t))
|
||||||
|
LCL = mid - K * standard * np.sqrt(namuda / (2 - namuda) * (1 - (1 - namuda) ** 2 * t))
|
||||||
|
return mid, UCL, LCL
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def condition_monitoring_model():
|
||||||
|
input = tf.keras.Input(shape=[time_stamp, feature_num])
|
||||||
|
conv1 = tf.keras.layers.Conv1D(filters=256, kernel_size=1)(input)
|
||||||
|
GRU1 = tf.keras.layers.GRU(128, return_sequences=False)(conv1)
|
||||||
|
d1 = tf.keras.layers.Dense(300)(GRU1)
|
||||||
|
output = tf.keras.layers.Dense(10)(d1)
|
||||||
|
|
||||||
|
model = tf.keras.Model(inputs=input, outputs=output)
|
||||||
|
|
||||||
|
return model
|
||||||
|
|
||||||
|
|
||||||
|
# trian_data:(300455,120,10)
|
||||||
|
# trian_label1:(300455,10)
|
||||||
|
# trian_label2:(300455,)
|
||||||
|
def shuffle(train_data, train_label1, train_label2, is_split: bool = False, split_size: float = 0.2):
|
||||||
|
(train_data, test_data, train_label1, test_label1, train_label2, test_label2) = train_test_split(train_data,
|
||||||
|
train_label1,
|
||||||
|
train_label2,
|
||||||
|
test_size=split_size,
|
||||||
|
shuffle=True,
|
||||||
|
random_state=100)
|
||||||
|
if is_split:
|
||||||
|
return train_data, train_label1, train_label2, test_data, test_label1, test_label2
|
||||||
|
train_data = np.concatenate([train_data, test_data], axis=0)
|
||||||
|
train_label1 = np.concatenate([train_label1, test_label1], axis=0)
|
||||||
|
train_label2 = np.concatenate([train_label2, test_label2], axis=0)
|
||||||
|
# print(train_data.shape)
|
||||||
|
# print(train_label1.shape)
|
||||||
|
# print(train_label2.shape)
|
||||||
|
# print(train_data.shape)
|
||||||
|
|
||||||
|
return train_data, train_label1, train_label2
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def split_test_data(healthy_data, healthy_label1, healthy_label2, unhealthy_data, unhealthy_label1, unhealthy_label2,
|
||||||
|
split_size: float = 0.2, shuffle: bool = True):
|
||||||
|
data = np.concatenate([healthy_data, unhealthy_data], axis=0)
|
||||||
|
label1 = np.concatenate([healthy_label1, unhealthy_label1], axis=0)
|
||||||
|
label2 = np.concatenate([healthy_label2, unhealthy_label2], axis=0)
|
||||||
|
(train_data, test_data, train_label1, test_label1, train_label2, test_label2) = train_test_split(data,
|
||||||
|
label1,
|
||||||
|
label2,
|
||||||
|
test_size=split_size,
|
||||||
|
shuffle=shuffle,
|
||||||
|
random_state=100)
|
||||||
|
|
||||||
|
# print(train_data.shape)
|
||||||
|
# print(train_label1.shape)
|
||||||
|
# print(train_label2.shape)
|
||||||
|
# print(train_data.shape)
|
||||||
|
|
||||||
|
return train_data, train_label1, train_label2, test_data, test_label1, test_label2
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# trian_data:(300455,120,10)
|
||||||
|
# trian_label1:(300455,10)
|
||||||
|
# trian_label2:(300455,)
|
||||||
|
def train_step_one(train_data, train_label1, train_label2):
|
||||||
|
model = Joint_Monitoring()
|
||||||
|
# # # # TODO 需要运行编译一次,才能打印model.summary()
|
||||||
|
# model.build(input_shape=(batch_size, filter_num, dims))
|
||||||
|
# model.summary()
|
||||||
|
history_loss = []
|
||||||
|
history_val_loss = []
|
||||||
|
learning_rate = 1e-3
|
||||||
|
for epoch in range(EPOCH):
|
||||||
|
|
||||||
|
print()
|
||||||
|
print("EPOCH:", epoch, "/", EPOCH, ":")
|
||||||
|
train_data, train_label1, train_label2 = shuffle(train_data, train_label1, train_label2)
|
||||||
|
if epoch == 0:
|
||||||
|
train_data, train_label1, train_label2, val_data, val_label1, val_label2 = shuffle(train_data, train_label1,
|
||||||
|
train_label2,
|
||||||
|
is_split=True)
|
||||||
|
# print()
|
||||||
|
# print("EPOCH:", epoch, "/", EPOCH, ":")
|
||||||
|
# 用于让train知道,这是这个epoch中的第几次训练
|
||||||
|
z = 0
|
||||||
|
# 用于batch_size次再训练
|
||||||
|
k = 1
|
||||||
|
for data_1, label_1, label_2 in zip(train_data, train_label1, train_label2):
|
||||||
|
size, _, _ = train_data.shape
|
||||||
|
data_1 = tf.expand_dims(data_1, axis=0)
|
||||||
|
label_1 = tf.expand_dims(label_1, axis=0)
|
||||||
|
label_2 = tf.expand_dims(label_2, axis=0)
|
||||||
|
if batch_size != 1:
|
||||||
|
if k % batch_size == 1:
|
||||||
|
data = data_1
|
||||||
|
label1 = label_1
|
||||||
|
label2 = label_2
|
||||||
|
else:
|
||||||
|
data = tf.concat([data, data_1], axis=0)
|
||||||
|
label1 = tf.concat([label1, label_1], axis=0)
|
||||||
|
label2 = tf.concat([label2, label_2], axis=0)
|
||||||
|
else:
|
||||||
|
data = data_1
|
||||||
|
label1 = label_1
|
||||||
|
label2 = label_2
|
||||||
|
|
||||||
|
if k % batch_size == 0:
|
||||||
|
# label = tf.expand_dims(label, axis=-1)
|
||||||
|
loss_value, accuracy_value = model.train(input_tensor=data, label1=label1, label2=label2,
|
||||||
|
learning_rate=learning_rate,
|
||||||
|
is_first_time=True)
|
||||||
|
print(z * batch_size, "/", size, ":===============>", "loss:", loss_value.numpy())
|
||||||
|
k = 0
|
||||||
|
z = z + 1
|
||||||
|
k = k + 1
|
||||||
|
val_loss, val_accuracy = model.get_val_loss(val_data=val_data, val_label1=val_label1, val_label2=val_label2,
|
||||||
|
is_first_time=True)
|
||||||
|
SaveBestModel(model=model, save_name=save_name, history_loss=history_val_loss, loss_value=val_loss.numpy())
|
||||||
|
# SaveBestH5Model(model=model, save_name=save_name, history_loss=history_val_loss, loss_value=val_loss.numpy())
|
||||||
|
history_val_loss.append(val_loss)
|
||||||
|
history_loss.append(loss_value.numpy())
|
||||||
|
print('Training loss is :', loss_value.numpy())
|
||||||
|
print('Validating loss is :', val_loss.numpy())
|
||||||
|
if IsStopTraining(history_loss=history_val_loss, patience=7):
|
||||||
|
break
|
||||||
|
if Is_Reduce_learning_rate(history_loss=history_val_loss, patience=3):
|
||||||
|
if learning_rate >= 1e-4:
|
||||||
|
learning_rate = learning_rate * 0.1
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def train_step_two(step_one_model, step_two_model, train_data, train_label1, train_label2):
|
||||||
|
# step_two_model = Joint_Monitoring()
|
||||||
|
# step_two_model.build(input_shape=(batch_size, time_stamp, feature_num))
|
||||||
|
# step_two_model.summary()
|
||||||
|
history_loss = []
|
||||||
|
history_val_loss = []
|
||||||
|
history_accuracy = []
|
||||||
|
learning_rate = 1e-3
|
||||||
|
for epoch in range(EPOCH):
|
||||||
|
print()
|
||||||
|
print("EPOCH:", epoch, "/", EPOCH, ":")
|
||||||
|
train_data, train_label1, train_label2 = shuffle(train_data, train_label1, train_label2)
|
||||||
|
if epoch == 0:
|
||||||
|
train_data, train_label1, train_label2, val_data, val_label1, val_label2 = shuffle(train_data, train_label1,
|
||||||
|
train_label2,
|
||||||
|
is_split=True)
|
||||||
|
# print()
|
||||||
|
# print("EPOCH:", epoch, "/", EPOCH, ":")
|
||||||
|
# 用于让train知道,这是这个epoch中的第几次训练
|
||||||
|
z = 0
|
||||||
|
# 用于batch_size次再训练
|
||||||
|
k = 1
|
||||||
|
accuracy_num = 0
|
||||||
|
for data_1, label_1, label_2 in zip(train_data, train_label1, train_label2):
|
||||||
|
size, _, _ = train_data.shape
|
||||||
|
data_1 = tf.expand_dims(data_1, axis=0)
|
||||||
|
label_1 = tf.expand_dims(label_1, axis=0)
|
||||||
|
label_2 = tf.expand_dims(label_2, axis=0)
|
||||||
|
if batch_size != 1:
|
||||||
|
if k % batch_size == 1:
|
||||||
|
data = data_1
|
||||||
|
label1 = label_1
|
||||||
|
label2 = label_2
|
||||||
|
else:
|
||||||
|
data = tf.concat([data, data_1], axis=0)
|
||||||
|
label1 = tf.concat([label1, label_1], axis=0)
|
||||||
|
label2 = tf.concat([label2, label_2], axis=0)
|
||||||
|
else:
|
||||||
|
data = data_1
|
||||||
|
label1 = label_1
|
||||||
|
label2 = label_2
|
||||||
|
|
||||||
|
if k % batch_size == 0:
|
||||||
|
# label = tf.expand_dims(label, axis=-1)
|
||||||
|
# output1, output2, output3, _ = step_one_model.call(inputs=data, is_first_time=True)
|
||||||
|
loss_value, accuracy_value = step_two_model.train(input_tensor=data, label1=label1, label2=label2,
|
||||||
|
learning_rate=learning_rate,
|
||||||
|
is_first_time=False)
|
||||||
|
accuracy_num += accuracy_value
|
||||||
|
print(z * batch_size, "/", size, ":===============>", "loss:", loss_value.numpy(), "| accuracy:",
|
||||||
|
accuracy_num / ((z + 1) * batch_size))
|
||||||
|
k = 0
|
||||||
|
z = z + 1
|
||||||
|
k = k + 1
|
||||||
|
|
||||||
|
val_loss, val_accuracy = step_two_model.get_val_loss(val_data=val_data, val_label1=val_label1,
|
||||||
|
val_label2=val_label2,
|
||||||
|
is_first_time=False, step_one_model=step_one_model)
|
||||||
|
SaveBestModelByAccuracy(model=step_two_model, save_name=save_step_two_name, history_accuracy=history_accuracy,
|
||||||
|
accuracy_value=val_accuracy)
|
||||||
|
history_val_loss.append(val_loss)
|
||||||
|
history_loss.append(loss_value.numpy())
|
||||||
|
history_accuracy.append(val_accuracy)
|
||||||
|
print('Training loss is : {0} | Training accuracy is : {1}'.format(loss_value.numpy(),
|
||||||
|
accuracy_num / ((z + 1) * batch_size)))
|
||||||
|
print('Validating loss is : {0} | Validating accuracy is : {1}'.format(val_loss.numpy(), val_accuracy))
|
||||||
|
if IsStopTraining(history_loss=history_val_loss, patience=7):
|
||||||
|
break
|
||||||
|
if Is_Reduce_learning_rate(history_loss=history_val_loss, patience=3):
|
||||||
|
if learning_rate >= 1e-4:
|
||||||
|
learning_rate = learning_rate * 0.1
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def test(step_one_model, step_two_model, test_data, test_label1, test_label2):
|
||||||
|
history_loss = []
|
||||||
|
history_val_loss = []
|
||||||
|
|
||||||
|
val_loss, val_accuracy = step_two_model.get_val_loss(val_data=test_data, val_label1=test_label1,
|
||||||
|
val_label2=test_label2,
|
||||||
|
is_first_time=False, step_one_model=step_one_model)
|
||||||
|
|
||||||
|
history_val_loss.append(val_loss)
|
||||||
|
print("val_accuracy:", val_accuracy)
|
||||||
|
print("val_loss:", val_loss)
|
||||||
|
|
||||||
|
|
||||||
|
def showResult(step_two_model: Joint_Monitoring, test_data, isPlot: bool = False,isSave:bool=True):
|
||||||
|
# 获取模型的所有参数的个数
|
||||||
|
# step_two_model.count_params()
|
||||||
|
total_result = []
|
||||||
|
size, length, dims = test_data.shape
|
||||||
|
for epoch in range(0, size - batch_size + 1, batch_size):
|
||||||
|
each_test_data = test_data[epoch:epoch + batch_size, :, :]
|
||||||
|
_, _, _, output4 = step_two_model.call(each_test_data, is_first_time=False)
|
||||||
|
total_result.append(output4)
|
||||||
|
total_result = np.reshape(total_result, [total_result.__len__(), -1])
|
||||||
|
total_result = np.reshape(total_result, [-1, ])
|
||||||
|
|
||||||
|
#误报率,漏报率,准确性的计算
|
||||||
|
|
||||||
|
|
||||||
|
if isSave:
|
||||||
|
|
||||||
|
np.savetxt(save_mse_name, total_result, delimiter=',')
|
||||||
|
if isPlot:
|
||||||
|
plt.figure(1, figsize=(6.0, 2.68))
|
||||||
|
plt.subplots_adjust(left=0.1, right=0.94, bottom=0.2, top=0.9, wspace=None,
|
||||||
|
hspace=None)
|
||||||
|
plt.tight_layout()
|
||||||
|
font1 = {'family': 'Times New Roman', 'weight': 'normal', 'size': 10} # 设置坐标标签的字体大小,字体
|
||||||
|
|
||||||
|
plt.scatter(list(range(total_result.shape[0])), total_result, c='black', s=10)
|
||||||
|
# 画出 y=1 这条水平线
|
||||||
|
plt.axhline(0.5, c='red', label='Failure threshold')
|
||||||
|
# 箭头指向上面的水平线
|
||||||
|
# plt.arrow(35000, 0.9, 33000, 0.75, head_width=0.02, head_length=0.1, shape="full", fc='red', ec='red',
|
||||||
|
# alpha=0.9, overhang=0.5)
|
||||||
|
plt.text(test_data.shape[0] * 2 / 3+1000, 0.7, "Truth Fault", fontsize=10, color='red', verticalalignment='top')
|
||||||
|
plt.axvline(test_data.shape[0] * 2 / 3, c='blue', ls='-.')
|
||||||
|
plt.xticks(range(6),('06/09/17','12/09/17','18/09/17','24/09/17','29/09/17')) # 设置x轴的标尺
|
||||||
|
plt.tick_params() #设置轴显示
|
||||||
|
plt.xlabel("time",fontdict=font1)
|
||||||
|
plt.ylabel("confience",fontdict=font1)
|
||||||
|
plt.text(total_result.shape[0] * 4 / 5, 0.6, "Fault", fontsize=10, color='black', verticalalignment='top',
|
||||||
|
horizontalalignment='center',
|
||||||
|
bbox={'facecolor': 'grey',
|
||||||
|
'pad': 10},fontdict=font1)
|
||||||
|
plt.text(total_result.shape[0] * 1 / 3, 0.4, "Norm", fontsize=10, color='black', verticalalignment='top',
|
||||||
|
horizontalalignment='center',
|
||||||
|
bbox={'facecolor': 'grey',
|
||||||
|
'pad': 10},fontdict=font1)
|
||||||
|
|
||||||
|
plt.grid()
|
||||||
|
# plt.ylim(0, 1)
|
||||||
|
# plt.xlim(-50, 1300)
|
||||||
|
# plt.legend("", loc='upper left')
|
||||||
|
plt.show()
|
||||||
|
return total_result
|
||||||
|
|
||||||
|
|
||||||
|
def get_MSE(data, label, new_model, isStandard: bool = True, isPlot: bool = True, predictI: int = 1):
|
||||||
|
predicted_data1 = []
|
||||||
|
predicted_data2 = []
|
||||||
|
predicted_data3 = []
|
||||||
|
size, length, dims = data.shape
|
||||||
|
for epoch in range(0, size, batch_size):
|
||||||
|
each_test_data = data[epoch:epoch + batch_size, :, :]
|
||||||
|
output1, output2, output3, _ = new_model.call(inputs=each_test_data, is_first_time=True)
|
||||||
|
if epoch == 0:
|
||||||
|
predicted_data1 = output1
|
||||||
|
predicted_data2 = output2
|
||||||
|
predicted_data3 = output3
|
||||||
|
else:
|
||||||
|
predicted_data1 = np.concatenate([predicted_data1, output1], axis=0)
|
||||||
|
predicted_data2 = np.concatenate([predicted_data2, output2], axis=0)
|
||||||
|
predicted_data3 = np.concatenate([predicted_data3, output3], axis=0)
|
||||||
|
|
||||||
|
predicted_data1 = np.reshape(predicted_data1, [-1, 10])
|
||||||
|
predicted_data2 = np.reshape(predicted_data2, [-1, 10])
|
||||||
|
predicted_data3 = np.reshape(predicted_data3, [-1, 10])
|
||||||
|
predict_data = 0
|
||||||
|
|
||||||
|
predict_data = predicted_data1
|
||||||
|
mseList = []
|
||||||
|
meanList = []
|
||||||
|
maxList = []
|
||||||
|
|
||||||
|
for i in range(1, 4):
|
||||||
|
print("i:", i)
|
||||||
|
if i == 1:
|
||||||
|
predict_data = predicted_data1
|
||||||
|
elif i == 2:
|
||||||
|
predict_data = predicted_data2
|
||||||
|
elif i == 3:
|
||||||
|
predict_data = predicted_data3
|
||||||
|
temp = np.abs(predict_data - label)
|
||||||
|
temp1 = (temp - np.broadcast_to(np.mean(temp, axis=0), shape=predict_data.shape))
|
||||||
|
temp2 = np.broadcast_to(np.sqrt(np.var(temp, axis=0)), shape=predict_data.shape)
|
||||||
|
temp3 = temp1 / temp2
|
||||||
|
mse = np.sum((temp1 / temp2) ** 2, axis=1)
|
||||||
|
|
||||||
|
print("mse.shape:", mse.shape)
|
||||||
|
# mse=np.mean((predicted_data-label)**2,axis=1)
|
||||||
|
# print("mse", mse)
|
||||||
|
mseList.append(mse)
|
||||||
|
if isStandard:
|
||||||
|
dims, = mse.shape
|
||||||
|
mean = np.mean(mse)
|
||||||
|
std = np.sqrt(np.var(mse))
|
||||||
|
max = mean + 3 * std
|
||||||
|
print("max.shape:", max.shape)
|
||||||
|
# min = mean-3*std
|
||||||
|
max = np.broadcast_to(max, shape=[dims, ])
|
||||||
|
# min = np.broadcast_to(min,shape=[dims,])
|
||||||
|
mean = np.broadcast_to(mean, shape=[dims, ])
|
||||||
|
if isPlot:
|
||||||
|
plt.figure(random.randint(1, 100))
|
||||||
|
plt.plot(max)
|
||||||
|
plt.plot(mse)
|
||||||
|
plt.plot(mean)
|
||||||
|
# plt.plot(min)
|
||||||
|
plt.show()
|
||||||
|
maxList.append(max)
|
||||||
|
meanList.append(mean)
|
||||||
|
else:
|
||||||
|
if isPlot:
|
||||||
|
plt.figure(random.randint(1, 100))
|
||||||
|
plt.plot(mse)
|
||||||
|
# plt.plot(min)
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
return mseList, meanList, maxList
|
||||||
|
# pass
|
||||||
|
|
||||||
|
|
||||||
|
# healthy_data是健康数据,用于确定阈值,all_data是完整的数据,用于模型出结果
|
||||||
|
def getResult(model: tf.keras.Model, healthy_data, healthy_label, unhealthy_data, unhealthy_label, isPlot: bool = False,
|
||||||
|
isSave: bool = False, predictI: int = 1):
|
||||||
|
# TODO 计算MSE确定阈值
|
||||||
|
# plt.ion()
|
||||||
|
mseList, meanList, maxList = get_MSE(healthy_data, healthy_label, model)
|
||||||
|
mse1List, _, _ = get_MSE(unhealthy_data, unhealthy_label, model, isStandard=False)
|
||||||
|
|
||||||
|
for mse, mean, max, mse1,j in zip(mseList, meanList, maxList, mse1List,range(3)):
|
||||||
|
|
||||||
|
# 误报率的计算
|
||||||
|
total, = mse.shape
|
||||||
|
faultNum = 0
|
||||||
|
faultList = []
|
||||||
|
for i in range(total):
|
||||||
|
if (mse[i] > max[i]):
|
||||||
|
faultNum += 1
|
||||||
|
faultList.append(mse[i])
|
||||||
|
|
||||||
|
fault_rate = faultNum / total
|
||||||
|
print("误报率:", fault_rate)
|
||||||
|
|
||||||
|
# 漏报率计算
|
||||||
|
missNum = 0
|
||||||
|
missList = []
|
||||||
|
all, = mse1.shape
|
||||||
|
for i in range(all):
|
||||||
|
if (mse1[i] < max[0]):
|
||||||
|
missNum += 1
|
||||||
|
missList.append(mse1[i])
|
||||||
|
|
||||||
|
miss_rate = missNum / all
|
||||||
|
print("漏报率:", miss_rate)
|
||||||
|
|
||||||
|
# 总体图
|
||||||
|
print("mse:", mse)
|
||||||
|
print("mse1:", mse1)
|
||||||
|
print("============================================")
|
||||||
|
total_mse = np.concatenate([mse, mse1], axis=0)
|
||||||
|
total_max = np.broadcast_to(max[0], shape=[total_mse.shape[0], ])
|
||||||
|
# min = np.broadcast_to(min,shape=[dims,])
|
||||||
|
total_mean = np.broadcast_to(mean[0], shape=[total_mse.shape[0], ])
|
||||||
|
|
||||||
|
if isSave:
|
||||||
|
save_mse_name1=save_mse_name[:-4]+"_predict"+str(j+1)+".csv"
|
||||||
|
save_max_name1=save_max_name[:-4]+"_predict"+str(j+1)+".csv"
|
||||||
|
|
||||||
|
np.savetxt(save_mse_name1,total_mse, delimiter=',')
|
||||||
|
np.savetxt(save_max_name1,total_max, delimiter=',')
|
||||||
|
|
||||||
|
|
||||||
|
plt.figure(random.randint(1, 100))
|
||||||
|
plt.plot(total_max)
|
||||||
|
plt.plot(total_mse)
|
||||||
|
plt.plot(total_mean)
|
||||||
|
# plt.plot(min)
|
||||||
|
plt.show()
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
total_data = loadData.execute(N=feature_num, file_name=file_name)
|
||||||
|
total_data = normalization(data=total_data)
|
||||||
|
train_data_healthy, train_label1_healthy, train_label2_healthy = get_training_data_overlapping(
|
||||||
|
total_data[:healthy_date, :], is_Healthy=True)
|
||||||
|
train_data_unhealthy, train_label1_unhealthy, train_label2_unhealthy = get_training_data_overlapping(
|
||||||
|
total_data[healthy_date - time_stamp + unhealthy_patience:unhealthy_date, :],
|
||||||
|
is_Healthy=False)
|
||||||
|
#### TODO 第一步训练
|
||||||
|
# 单次测试
|
||||||
|
# train_step_one(train_data=train_data_healthy[:32, :, :], train_label1=train_label1_healthy[:32, :],train_label2=train_label2_healthy[:32, ])
|
||||||
|
# train_step_one(train_data=train_data_healthy, train_label1=train_label1_healthy, train_label2=train_label2_healthy)
|
||||||
|
|
||||||
|
# 导入第一步已经训练好的模型,一个继续训练,一个只输出结果
|
||||||
|
# step_one_model = Joint_Monitoring()
|
||||||
|
# # step_one_model.load_weights(save_name)
|
||||||
|
# #
|
||||||
|
# step_two_model = Joint_Monitoring()
|
||||||
|
# step_two_model.load_weights(save_name)
|
||||||
|
|
||||||
|
#### TODO 第二步训练
|
||||||
|
### healthy_data.shape: (300333,120,10)
|
||||||
|
### unhealthy_data.shape: (16594,10)
|
||||||
|
healthy_size, _, _ = train_data_healthy.shape
|
||||||
|
unhealthy_size, _, _ = train_data_unhealthy.shape
|
||||||
|
train_data, train_label1, train_label2, test_data, test_label1, test_label2 = split_test_data(
|
||||||
|
healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:, :, :],
|
||||||
|
healthy_label1=train_label1_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||||
|
healthy_label2=train_label2_healthy[healthy_size - 2 * unhealthy_size:, ], unhealthy_data=train_data_unhealthy,
|
||||||
|
unhealthy_label1=train_label1_unhealthy, unhealthy_label2=train_label2_unhealthy)
|
||||||
|
# train_step_two(step_one_model=step_one_model, step_two_model=step_two_model,
|
||||||
|
# train_data=train_data,
|
||||||
|
# train_label1=train_label1, train_label2=np.expand_dims(train_label2, axis=-1))
|
||||||
|
|
||||||
|
healthy_size, _, _ = train_data_healthy.shape
|
||||||
|
unhealthy_size, _, _ = train_data_unhealthy.shape
|
||||||
|
|
||||||
|
##出结果单次测试
|
||||||
|
# getResult(step_one_model,
|
||||||
|
# healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:healthy_size - 2 * unhealthy_size + 200,
|
||||||
|
# :],
|
||||||
|
# healthy_label=train_label1_healthy[
|
||||||
|
# healthy_size - 2 * unhealthy_size:healthy_size - 2 * unhealthy_size + 200, :],
|
||||||
|
# unhealthy_data=train_data_unhealthy[:200, :], unhealthy_label=train_label1_unhealthy[:200, :],isSave=True)
|
||||||
|
|
||||||
|
### TODO 测试测试集
|
||||||
|
# step_one_model = Joint_Monitoring()
|
||||||
|
# step_one_model.load_weights(save_name)
|
||||||
|
step_two_model = Joint_Monitoring()
|
||||||
|
step_two_model.load_weights(save_step_two_name)
|
||||||
|
# test(step_one_model=step_one_model, step_two_model=step_two_model, test_data=test_data, test_label1=test_label1,
|
||||||
|
# test_label2=np.expand_dims(test_label2, axis=-1))
|
||||||
|
|
||||||
|
###TODO 展示全部的结果
|
||||||
|
all_data, _, _ = get_training_data_overlapping(
|
||||||
|
total_data[healthy_size - 2 * unhealthy_size:unhealthy_date, :], is_Healthy=True)
|
||||||
|
# 单次测试
|
||||||
|
# showResult(step_two_model, test_data=all_data[:32], isPlot=True)
|
||||||
|
showResult(step_two_model, test_data=all_data, isPlot=True)
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
@ -0,0 +1,714 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# coding: utf-8
|
||||||
|
|
||||||
|
'''
|
||||||
|
@Author : dingjiawen
|
||||||
|
@Date : 2022/10/11 18:55
|
||||||
|
@Usage :
|
||||||
|
@Desc : RNet直接进行分类
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
import tensorflow as tf
|
||||||
|
import tensorflow.keras
|
||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
from model.DepthwiseCon1D.DepthwiseConv1D import DepthwiseConv1D
|
||||||
|
from model.Dynamic_channelAttention.Dynamic_channelAttention import DynamicChannelAttention
|
||||||
|
from condition_monitoring.data_deal import loadData
|
||||||
|
from model.Joint_Monitoring.compare.RNet_34 import Joint_Monitoring
|
||||||
|
|
||||||
|
from model.CommonFunction.CommonFunction import *
|
||||||
|
from sklearn.model_selection import train_test_split
|
||||||
|
from tensorflow.keras.models import load_model, save_model
|
||||||
|
import random
|
||||||
|
|
||||||
|
'''超参数设置'''
|
||||||
|
time_stamp = 120
|
||||||
|
feature_num = 10
|
||||||
|
batch_size = 16
|
||||||
|
learning_rate = 0.001
|
||||||
|
EPOCH = 101
|
||||||
|
model_name = "RNet_C"
|
||||||
|
'''EWMA超参数'''
|
||||||
|
K = 18
|
||||||
|
namuda = 0.01
|
||||||
|
'''保存名称'''
|
||||||
|
|
||||||
|
save_name = "./model/weight/{0}/weight".format(model_name,
|
||||||
|
time_stamp,
|
||||||
|
feature_num,
|
||||||
|
batch_size,
|
||||||
|
EPOCH)
|
||||||
|
save_step_two_name = "./model/two_weight/{0}_weight_epoch6_99899_9996/weight".format(model_name,
|
||||||
|
time_stamp,
|
||||||
|
feature_num,
|
||||||
|
batch_size,
|
||||||
|
EPOCH)
|
||||||
|
|
||||||
|
save_mse_name = "./mse/RNet_C/{0}_timestamp{1}_feature{2}_result.csv".format(model_name,
|
||||||
|
time_stamp,
|
||||||
|
feature_num,
|
||||||
|
batch_size,
|
||||||
|
EPOCH)
|
||||||
|
save_max_name = "./mse/RNet_C/{0}_timestamp{1}_feature{2}_max.csv".format(model_name,
|
||||||
|
time_stamp,
|
||||||
|
feature_num,
|
||||||
|
batch_size,
|
||||||
|
EPOCH)
|
||||||
|
|
||||||
|
'''文件名'''
|
||||||
|
file_name = "G:\data\SCADA数据\jb4q_8_delete_total_zero.csv"
|
||||||
|
|
||||||
|
'''
|
||||||
|
文件说明:jb4q_8_delete_total_zero.csv是删除了只删除了全是0的列的文件
|
||||||
|
文件从0:415548行均是正常值(2019/7.30 00:00:00 - 2019/9/18 11:14:00)
|
||||||
|
从415549:432153行均是异常值(2019/9/18 11:21:01 - 2021/1/18 00:00:00)
|
||||||
|
'''
|
||||||
|
'''文件参数'''
|
||||||
|
# 最后正常的时间点
|
||||||
|
healthy_date = 415548
|
||||||
|
# 最后异常的时间点
|
||||||
|
unhealthy_date = 432153
|
||||||
|
# 异常容忍程度
|
||||||
|
unhealthy_patience = 5
|
||||||
|
|
||||||
|
|
||||||
|
# 画图相关设置
|
||||||
|
font1 = {'family': 'Times New Roman', 'weight': 'normal', 'size': 10} # 设置坐标标签的字体大小,字体
|
||||||
|
|
||||||
|
|
||||||
|
def remove(data, time_stamp=time_stamp):
|
||||||
|
rows, cols = data.shape
|
||||||
|
print("remove_data.shape:", data.shape)
|
||||||
|
num = int(rows / time_stamp)
|
||||||
|
|
||||||
|
return data[:num * time_stamp, :]
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# 不重叠采样
|
||||||
|
def get_training_data(data, time_stamp: int = time_stamp):
|
||||||
|
removed_data = remove(data=data)
|
||||||
|
rows, cols = removed_data.shape
|
||||||
|
print("removed_data.shape:", data.shape)
|
||||||
|
print("removed_data:", removed_data)
|
||||||
|
train_data = np.reshape(removed_data, [-1, time_stamp, cols])
|
||||||
|
print("train_data:", train_data)
|
||||||
|
batchs, time_stamp, cols = train_data.shape
|
||||||
|
|
||||||
|
for i in range(1, batchs):
|
||||||
|
each_label = np.expand_dims(train_data[i, 0, :], axis=0)
|
||||||
|
if i == 1:
|
||||||
|
train_label = each_label
|
||||||
|
else:
|
||||||
|
train_label = np.concatenate([train_label, each_label], axis=0)
|
||||||
|
|
||||||
|
print("train_data.shape:", train_data.shape)
|
||||||
|
print("train_label.shape", train_label.shape)
|
||||||
|
return train_data[:-1, :], train_label
|
||||||
|
|
||||||
|
|
||||||
|
# 重叠采样
|
||||||
|
def get_training_data_overlapping(data, time_stamp: int = time_stamp, is_Healthy: bool = True):
|
||||||
|
rows, cols = data.shape
|
||||||
|
train_data = np.empty(shape=[rows - time_stamp - 1, time_stamp, cols])
|
||||||
|
train_label = np.empty(shape=[rows - time_stamp - 1, cols])
|
||||||
|
for i in range(rows):
|
||||||
|
if i + time_stamp >= rows:
|
||||||
|
break
|
||||||
|
if i + time_stamp < rows - 1:
|
||||||
|
train_data[i] = data[i:i + time_stamp]
|
||||||
|
train_label[i] = data[i + time_stamp]
|
||||||
|
|
||||||
|
print("重叠采样以后:")
|
||||||
|
print("data:", train_data) # (300334,120,10)
|
||||||
|
print("label:", train_label) # (300334,10)
|
||||||
|
|
||||||
|
if is_Healthy:
|
||||||
|
train_label2 = np.ones(shape=[train_label.shape[0]])
|
||||||
|
else:
|
||||||
|
train_label2 = np.zeros(shape=[train_label.shape[0]])
|
||||||
|
|
||||||
|
print("label2:", train_label2)
|
||||||
|
|
||||||
|
return train_data, train_label, train_label2
|
||||||
|
|
||||||
|
|
||||||
|
# RepConv重参数化卷积
|
||||||
|
def RepConv(input_tensor, k=3):
|
||||||
|
_, _, output_dim = input_tensor.shape
|
||||||
|
conv1 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=k, strides=1, padding='SAME')(input_tensor)
|
||||||
|
b1 = tf.keras.layers.BatchNormalization()(conv1)
|
||||||
|
|
||||||
|
conv2 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=1, strides=1, padding='SAME')(input_tensor)
|
||||||
|
b2 = tf.keras.layers.BatchNormalization()(conv2)
|
||||||
|
|
||||||
|
b3 = tf.keras.layers.BatchNormalization()(input_tensor)
|
||||||
|
|
||||||
|
out = tf.keras.layers.Add()([b1, b2, b3])
|
||||||
|
out = tf.nn.relu(out)
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
# RepBlock模块
|
||||||
|
def RepBlock(input_tensor, num: int = 3):
|
||||||
|
for i in range(num):
|
||||||
|
input_tensor = RepConv(input_tensor)
|
||||||
|
return input_tensor
|
||||||
|
|
||||||
|
|
||||||
|
# GAP 全局平均池化
|
||||||
|
def Global_avg_channelAttention(input_tensor):
|
||||||
|
_, length, channel = input_tensor.shape
|
||||||
|
DWC1 = DepthwiseConv1D(kernel_size=1, padding='SAME')(input_tensor)
|
||||||
|
GAP = tf.keras.layers.GlobalAvgPool1D()(DWC1)
|
||||||
|
c1 = tf.keras.layers.Conv1D(filters=channel, kernel_size=1, padding='SAME')(GAP)
|
||||||
|
s1 = tf.nn.sigmoid(c1)
|
||||||
|
output = tf.multiply(input_tensor, s1)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
# GDP 全局动态池化
|
||||||
|
def Global_Dynamic_channelAttention(input_tensor):
|
||||||
|
_, length, channel = input_tensor.shape
|
||||||
|
DWC1 = DepthwiseConv1D(kernel_size=1, padding='SAME')(input_tensor)
|
||||||
|
|
||||||
|
# GAP
|
||||||
|
GAP = tf.keras.layers.GlobalAvgPool1D()(DWC1)
|
||||||
|
c1 = tf.keras.layers.Conv1D(filters=channel, kernel_size=1, padding='SAME')(GAP)
|
||||||
|
s1 = tf.nn.sigmoid(c1)
|
||||||
|
|
||||||
|
# GMP
|
||||||
|
GMP = tf.keras.layers.GlobalMaxPool1D()(DWC1)
|
||||||
|
c2 = tf.keras.layers.Conv1D(filters=channel, kernel_size=1, padding='SAME')(GMP)
|
||||||
|
s3 = tf.nn.sigmoid(c2)
|
||||||
|
|
||||||
|
output = tf.multiply(input_tensor, s1)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
# 归一化
|
||||||
|
def normalization(data):
|
||||||
|
rows, cols = data.shape
|
||||||
|
print("归一化之前:", data)
|
||||||
|
print(data.shape)
|
||||||
|
print("======================")
|
||||||
|
|
||||||
|
# 归一化
|
||||||
|
max = np.max(data, axis=0)
|
||||||
|
max = np.broadcast_to(max, [rows, cols])
|
||||||
|
min = np.min(data, axis=0)
|
||||||
|
min = np.broadcast_to(min, [rows, cols])
|
||||||
|
|
||||||
|
data = (data - min) / (max - min)
|
||||||
|
print("归一化之后:", data)
|
||||||
|
print(data.shape)
|
||||||
|
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
# 正则化
|
||||||
|
def Regularization(data):
|
||||||
|
rows, cols = data.shape
|
||||||
|
print("正则化之前:", data)
|
||||||
|
print(data.shape)
|
||||||
|
print("======================")
|
||||||
|
|
||||||
|
# 正则化
|
||||||
|
mean = np.mean(data, axis=0)
|
||||||
|
mean = np.broadcast_to(mean, shape=[rows, cols])
|
||||||
|
dst = np.sqrt(np.var(data, axis=0))
|
||||||
|
dst = np.broadcast_to(dst, shape=[rows, cols])
|
||||||
|
data = (data - mean) / dst
|
||||||
|
print("正则化之后:", data)
|
||||||
|
print(data.shape)
|
||||||
|
|
||||||
|
return data
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def EWMA(data, K=K, namuda=namuda):
|
||||||
|
# t是啥暂时未知
|
||||||
|
t = 0
|
||||||
|
mid = np.mean(data, axis=0)
|
||||||
|
standard = np.sqrt(np.var(data, axis=0))
|
||||||
|
UCL = mid + K * standard * np.sqrt(namuda / (2 - namuda) * (1 - (1 - namuda) ** 2 * t))
|
||||||
|
LCL = mid - K * standard * np.sqrt(namuda / (2 - namuda) * (1 - (1 - namuda) ** 2 * t))
|
||||||
|
return mid, UCL, LCL
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def condition_monitoring_model():
|
||||||
|
input = tf.keras.Input(shape=[time_stamp, feature_num])
|
||||||
|
conv1 = tf.keras.layers.Conv1D(filters=256, kernel_size=1)(input)
|
||||||
|
GRU1 = tf.keras.layers.GRU(128, return_sequences=False)(conv1)
|
||||||
|
d1 = tf.keras.layers.Dense(300)(GRU1)
|
||||||
|
output = tf.keras.layers.Dense(10)(d1)
|
||||||
|
|
||||||
|
model = tf.keras.Model(inputs=input, outputs=output)
|
||||||
|
|
||||||
|
return model
|
||||||
|
|
||||||
|
|
||||||
|
# trian_data:(300455,120,10)
|
||||||
|
# trian_label1:(300455,10)
|
||||||
|
# trian_label2:(300455,)
|
||||||
|
def shuffle(train_data, train_label1, train_label2, is_split: bool = False, split_size: float = 0.2):
|
||||||
|
(train_data, test_data, train_label1, test_label1, train_label2, test_label2) = train_test_split(train_data,
|
||||||
|
train_label1,
|
||||||
|
train_label2,
|
||||||
|
test_size=split_size,
|
||||||
|
shuffle=True,
|
||||||
|
random_state=100)
|
||||||
|
if is_split:
|
||||||
|
return train_data, train_label1, train_label2, test_data, test_label1, test_label2
|
||||||
|
train_data = np.concatenate([train_data, test_data], axis=0)
|
||||||
|
train_label1 = np.concatenate([train_label1, test_label1], axis=0)
|
||||||
|
train_label2 = np.concatenate([train_label2, test_label2], axis=0)
|
||||||
|
# print(train_data.shape)
|
||||||
|
# print(train_label1.shape)
|
||||||
|
# print(train_label2.shape)
|
||||||
|
# print(train_data.shape)
|
||||||
|
|
||||||
|
return train_data, train_label1, train_label2
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def split_test_data(healthy_data, healthy_label1, healthy_label2, unhealthy_data, unhealthy_label1, unhealthy_label2,
|
||||||
|
split_size: float = 0.2, shuffle: bool = True):
|
||||||
|
data = np.concatenate([healthy_data, unhealthy_data], axis=0)
|
||||||
|
label1 = np.concatenate([healthy_label1, unhealthy_label1], axis=0)
|
||||||
|
label2 = np.concatenate([healthy_label2, unhealthy_label2], axis=0)
|
||||||
|
(train_data, test_data, train_label1, test_label1, train_label2, test_label2) = train_test_split(data,
|
||||||
|
label1,
|
||||||
|
label2,
|
||||||
|
test_size=split_size,
|
||||||
|
shuffle=shuffle,
|
||||||
|
random_state=100)
|
||||||
|
|
||||||
|
# print(train_data.shape)
|
||||||
|
# print(train_label1.shape)
|
||||||
|
# print(train_label2.shape)
|
||||||
|
# print(train_data.shape)
|
||||||
|
|
||||||
|
return train_data, train_label1, train_label2, test_data, test_label1, test_label2
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# trian_data:(300455,120,10)
|
||||||
|
# trian_label1:(300455,10)
|
||||||
|
# trian_label2:(300455,)
|
||||||
|
def train_step_one(train_data, train_label1, train_label2):
|
||||||
|
model = Joint_Monitoring()
|
||||||
|
# # # # TODO 需要运行编译一次,才能打印model.summary()
|
||||||
|
# model.build(input_shape=(batch_size, filter_num, dims))
|
||||||
|
# model.summary()
|
||||||
|
history_loss = []
|
||||||
|
history_val_loss = []
|
||||||
|
learning_rate = 1e-3
|
||||||
|
for epoch in range(EPOCH):
|
||||||
|
|
||||||
|
print()
|
||||||
|
print("EPOCH:", epoch, "/", EPOCH, ":")
|
||||||
|
train_data, train_label1, train_label2 = shuffle(train_data, train_label1, train_label2)
|
||||||
|
if epoch == 0:
|
||||||
|
train_data, train_label1, train_label2, val_data, val_label1, val_label2 = shuffle(train_data, train_label1,
|
||||||
|
train_label2,
|
||||||
|
is_split=True)
|
||||||
|
# print()
|
||||||
|
# print("EPOCH:", epoch, "/", EPOCH, ":")
|
||||||
|
# 用于让train知道,这是这个epoch中的第几次训练
|
||||||
|
z = 0
|
||||||
|
# 用于batch_size次再训练
|
||||||
|
k = 1
|
||||||
|
for data_1, label_1, label_2 in zip(train_data, train_label1, train_label2):
|
||||||
|
size, _, _ = train_data.shape
|
||||||
|
data_1 = tf.expand_dims(data_1, axis=0)
|
||||||
|
label_1 = tf.expand_dims(label_1, axis=0)
|
||||||
|
label_2 = tf.expand_dims(label_2, axis=0)
|
||||||
|
if batch_size != 1:
|
||||||
|
if k % batch_size == 1:
|
||||||
|
data = data_1
|
||||||
|
label1 = label_1
|
||||||
|
label2 = label_2
|
||||||
|
else:
|
||||||
|
data = tf.concat([data, data_1], axis=0)
|
||||||
|
label1 = tf.concat([label1, label_1], axis=0)
|
||||||
|
label2 = tf.concat([label2, label_2], axis=0)
|
||||||
|
else:
|
||||||
|
data = data_1
|
||||||
|
label1 = label_1
|
||||||
|
label2 = label_2
|
||||||
|
|
||||||
|
if k % batch_size == 0:
|
||||||
|
# label = tf.expand_dims(label, axis=-1)
|
||||||
|
loss_value, accuracy_value = model.train(input_tensor=data, label1=label1, label2=label2,
|
||||||
|
learning_rate=learning_rate,
|
||||||
|
is_first_time=True)
|
||||||
|
print(z * batch_size, "/", size, ":===============>", "loss:", loss_value.numpy())
|
||||||
|
k = 0
|
||||||
|
z = z + 1
|
||||||
|
k = k + 1
|
||||||
|
val_loss, val_accuracy = model.get_val_loss(val_data=val_data, val_label1=val_label1, val_label2=val_label2,
|
||||||
|
is_first_time=True)
|
||||||
|
SaveBestModel(model=model, save_name=save_name, history_loss=history_val_loss, loss_value=val_loss.numpy())
|
||||||
|
# SaveBestH5Model(model=model, save_name=save_name, history_loss=history_val_loss, loss_value=val_loss.numpy())
|
||||||
|
history_val_loss.append(val_loss)
|
||||||
|
history_loss.append(loss_value.numpy())
|
||||||
|
print('Training loss is :', loss_value.numpy())
|
||||||
|
print('Validating loss is :', val_loss.numpy())
|
||||||
|
if IsStopTraining(history_loss=history_val_loss, patience=7):
|
||||||
|
break
|
||||||
|
if Is_Reduce_learning_rate(history_loss=history_val_loss, patience=3):
|
||||||
|
if learning_rate >= 1e-4:
|
||||||
|
learning_rate = learning_rate * 0.1
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def train_step_two(step_one_model, step_two_model, train_data, train_label1, train_label2):
|
||||||
|
# step_two_model = Joint_Monitoring()
|
||||||
|
# step_two_model.build(input_shape=(batch_size, time_stamp, feature_num))
|
||||||
|
# step_two_model.summary()
|
||||||
|
history_loss = []
|
||||||
|
history_val_loss = []
|
||||||
|
history_accuracy = []
|
||||||
|
learning_rate = 1e-3
|
||||||
|
for epoch in range(EPOCH):
|
||||||
|
print()
|
||||||
|
print("EPOCH:", epoch, "/", EPOCH, ":")
|
||||||
|
train_data, train_label1, train_label2 = shuffle(train_data, train_label1, train_label2)
|
||||||
|
if epoch == 0:
|
||||||
|
train_data, train_label1, train_label2, val_data, val_label1, val_label2 = shuffle(train_data, train_label1,
|
||||||
|
train_label2,
|
||||||
|
is_split=True)
|
||||||
|
# print()
|
||||||
|
# print("EPOCH:", epoch, "/", EPOCH, ":")
|
||||||
|
# 用于让train知道,这是这个epoch中的第几次训练
|
||||||
|
z = 0
|
||||||
|
# 用于batch_size次再训练
|
||||||
|
k = 1
|
||||||
|
accuracy_num = 0
|
||||||
|
for data_1, label_1, label_2 in zip(train_data, train_label1, train_label2):
|
||||||
|
size, _, _ = train_data.shape
|
||||||
|
data_1 = tf.expand_dims(data_1, axis=0)
|
||||||
|
label_1 = tf.expand_dims(label_1, axis=0)
|
||||||
|
label_2 = tf.expand_dims(label_2, axis=0)
|
||||||
|
if batch_size != 1:
|
||||||
|
if k % batch_size == 1:
|
||||||
|
data = data_1
|
||||||
|
label1 = label_1
|
||||||
|
label2 = label_2
|
||||||
|
else:
|
||||||
|
data = tf.concat([data, data_1], axis=0)
|
||||||
|
label1 = tf.concat([label1, label_1], axis=0)
|
||||||
|
label2 = tf.concat([label2, label_2], axis=0)
|
||||||
|
else:
|
||||||
|
data = data_1
|
||||||
|
label1 = label_1
|
||||||
|
label2 = label_2
|
||||||
|
|
||||||
|
if k % batch_size == 0:
|
||||||
|
# label = tf.expand_dims(label, axis=-1)
|
||||||
|
# output1, output2, output3, _ = step_one_model.call(inputs=data, is_first_time=True)
|
||||||
|
loss_value, accuracy_value = step_two_model.train(input_tensor=data, label1=label1, label2=label2,
|
||||||
|
learning_rate=learning_rate,
|
||||||
|
is_first_time=False)
|
||||||
|
accuracy_num += accuracy_value
|
||||||
|
print(z * batch_size, "/", size, ":===============>", "loss:", loss_value.numpy(), "| accuracy:",
|
||||||
|
accuracy_num / ((z + 1) * batch_size))
|
||||||
|
k = 0
|
||||||
|
z = z + 1
|
||||||
|
k = k + 1
|
||||||
|
|
||||||
|
val_loss, val_accuracy = step_two_model.get_val_loss(val_data=val_data, val_label1=val_label1,
|
||||||
|
val_label2=val_label2,
|
||||||
|
is_first_time=False, step_one_model=step_one_model)
|
||||||
|
SaveBestModelByAccuracy(model=step_two_model, save_name=save_step_two_name, history_accuracy=history_accuracy,
|
||||||
|
accuracy_value=val_accuracy)
|
||||||
|
history_val_loss.append(val_loss)
|
||||||
|
history_loss.append(loss_value.numpy())
|
||||||
|
history_accuracy.append(val_accuracy)
|
||||||
|
print('Training loss is : {0} | Training accuracy is : {1}'.format(loss_value.numpy(),
|
||||||
|
accuracy_num / ((z + 1) * batch_size)))
|
||||||
|
print('Validating loss is : {0} | Validating accuracy is : {1}'.format(val_loss.numpy(), val_accuracy))
|
||||||
|
if IsStopTraining(history_loss=history_val_loss, patience=7):
|
||||||
|
break
|
||||||
|
if Is_Reduce_learning_rate(history_loss=history_val_loss, patience=3):
|
||||||
|
if learning_rate >= 1e-4:
|
||||||
|
learning_rate = learning_rate * 0.1
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def test(step_one_model, step_two_model, test_data, test_label1, test_label2):
|
||||||
|
history_loss = []
|
||||||
|
history_val_loss = []
|
||||||
|
|
||||||
|
val_loss, val_accuracy = step_two_model.get_val_loss(val_data=test_data, val_label1=test_label1,
|
||||||
|
val_label2=test_label2,
|
||||||
|
is_first_time=False, step_one_model=step_one_model)
|
||||||
|
|
||||||
|
history_val_loss.append(val_loss)
|
||||||
|
print("val_accuracy:", val_accuracy)
|
||||||
|
print("val_loss:", val_loss)
|
||||||
|
|
||||||
|
|
||||||
|
def showResult(step_two_model: Joint_Monitoring, test_data, isPlot: bool = False,isSave:bool=True):
|
||||||
|
# 获取模型的所有参数的个数
|
||||||
|
# step_two_model.count_params()
|
||||||
|
total_result = []
|
||||||
|
size, length, dims = test_data.shape
|
||||||
|
for epoch in range(0, size - batch_size + 1, batch_size):
|
||||||
|
each_test_data = test_data[epoch:epoch + batch_size, :, :]
|
||||||
|
_, _, _, output4 = step_two_model.call(each_test_data, is_first_time=False)
|
||||||
|
total_result.append(output4)
|
||||||
|
total_result = np.reshape(total_result, [total_result.__len__(), -1])
|
||||||
|
total_result = np.reshape(total_result, [-1, ])
|
||||||
|
|
||||||
|
#误报率,漏报率,准确性的计算
|
||||||
|
|
||||||
|
|
||||||
|
if isSave:
|
||||||
|
|
||||||
|
np.savetxt(save_mse_name, total_result, delimiter=',')
|
||||||
|
if isPlot:
|
||||||
|
plt.figure(1, figsize=(6.0, 2.68))
|
||||||
|
plt.subplots_adjust(left=0.1, right=0.94, bottom=0.2, top=0.9, wspace=None,
|
||||||
|
hspace=None)
|
||||||
|
plt.tight_layout()
|
||||||
|
font1 = {'family': 'Times New Roman', 'weight': 'normal', 'size': 10} # 设置坐标标签的字体大小,字体
|
||||||
|
|
||||||
|
plt.scatter(list(range(total_result.shape[0])), total_result, c='black', s=10)
|
||||||
|
# 画出 y=1 这条水平线
|
||||||
|
plt.axhline(0.5, c='red', label='Failure threshold')
|
||||||
|
# 箭头指向上面的水平线
|
||||||
|
# plt.arrow(35000, 0.9, 33000, 0.75, head_width=0.02, head_length=0.1, shape="full", fc='red', ec='red',
|
||||||
|
# alpha=0.9, overhang=0.5)
|
||||||
|
plt.text(test_data.shape[0] * 2 / 3+1000, 0.7, "Truth Fault", fontsize=10, color='red', verticalalignment='top')
|
||||||
|
plt.axvline(test_data.shape[0] * 2 / 3, c='blue', ls='-.')
|
||||||
|
plt.xticks(range(6),('06/09/17','12/09/17','18/09/17','24/09/17','29/09/17')) # 设置x轴的标尺
|
||||||
|
plt.tick_params() #设置轴显示
|
||||||
|
plt.xlabel("time",fontdict=font1)
|
||||||
|
plt.ylabel("confience",fontdict=font1)
|
||||||
|
plt.text(total_result.shape[0] * 4 / 5, 0.6, "Fault", fontsize=10, color='black', verticalalignment='top',
|
||||||
|
horizontalalignment='center',
|
||||||
|
bbox={'facecolor': 'grey',
|
||||||
|
'pad': 10},fontdict=font1)
|
||||||
|
plt.text(total_result.shape[0] * 1 / 3, 0.4, "Norm", fontsize=10, color='black', verticalalignment='top',
|
||||||
|
horizontalalignment='center',
|
||||||
|
bbox={'facecolor': 'grey',
|
||||||
|
'pad': 10},fontdict=font1)
|
||||||
|
|
||||||
|
plt.grid()
|
||||||
|
# plt.ylim(0, 1)
|
||||||
|
# plt.xlim(-50, 1300)
|
||||||
|
# plt.legend("", loc='upper left')
|
||||||
|
plt.show()
|
||||||
|
return total_result
|
||||||
|
|
||||||
|
|
||||||
|
def get_MSE(data, label, new_model, isStandard: bool = True, isPlot: bool = True, predictI: int = 1):
|
||||||
|
predicted_data1 = []
|
||||||
|
predicted_data2 = []
|
||||||
|
predicted_data3 = []
|
||||||
|
size, length, dims = data.shape
|
||||||
|
for epoch in range(0, size, batch_size):
|
||||||
|
each_test_data = data[epoch:epoch + batch_size, :, :]
|
||||||
|
output1, output2, output3, _ = new_model.call(inputs=each_test_data, is_first_time=True)
|
||||||
|
if epoch == 0:
|
||||||
|
predicted_data1 = output1
|
||||||
|
predicted_data2 = output2
|
||||||
|
predicted_data3 = output3
|
||||||
|
else:
|
||||||
|
predicted_data1 = np.concatenate([predicted_data1, output1], axis=0)
|
||||||
|
predicted_data2 = np.concatenate([predicted_data2, output2], axis=0)
|
||||||
|
predicted_data3 = np.concatenate([predicted_data3, output3], axis=0)
|
||||||
|
|
||||||
|
predicted_data1 = np.reshape(predicted_data1, [-1, 10])
|
||||||
|
predicted_data2 = np.reshape(predicted_data2, [-1, 10])
|
||||||
|
predicted_data3 = np.reshape(predicted_data3, [-1, 10])
|
||||||
|
predict_data = 0
|
||||||
|
|
||||||
|
predict_data = predicted_data1
|
||||||
|
mseList = []
|
||||||
|
meanList = []
|
||||||
|
maxList = []
|
||||||
|
|
||||||
|
for i in range(1, 4):
|
||||||
|
print("i:", i)
|
||||||
|
if i == 1:
|
||||||
|
predict_data = predicted_data1
|
||||||
|
elif i == 2:
|
||||||
|
predict_data = predicted_data2
|
||||||
|
elif i == 3:
|
||||||
|
predict_data = predicted_data3
|
||||||
|
temp = np.abs(predict_data - label)
|
||||||
|
temp1 = (temp - np.broadcast_to(np.mean(temp, axis=0), shape=predict_data.shape))
|
||||||
|
temp2 = np.broadcast_to(np.sqrt(np.var(temp, axis=0)), shape=predict_data.shape)
|
||||||
|
temp3 = temp1 / temp2
|
||||||
|
mse = np.sum((temp1 / temp2) ** 2, axis=1)
|
||||||
|
|
||||||
|
print("mse.shape:", mse.shape)
|
||||||
|
# mse=np.mean((predicted_data-label)**2,axis=1)
|
||||||
|
# print("mse", mse)
|
||||||
|
mseList.append(mse)
|
||||||
|
if isStandard:
|
||||||
|
dims, = mse.shape
|
||||||
|
mean = np.mean(mse)
|
||||||
|
std = np.sqrt(np.var(mse))
|
||||||
|
max = mean + 3 * std
|
||||||
|
print("max.shape:", max.shape)
|
||||||
|
# min = mean-3*std
|
||||||
|
max = np.broadcast_to(max, shape=[dims, ])
|
||||||
|
# min = np.broadcast_to(min,shape=[dims,])
|
||||||
|
mean = np.broadcast_to(mean, shape=[dims, ])
|
||||||
|
if isPlot:
|
||||||
|
plt.figure(random.randint(1, 100))
|
||||||
|
plt.plot(max)
|
||||||
|
plt.plot(mse)
|
||||||
|
plt.plot(mean)
|
||||||
|
# plt.plot(min)
|
||||||
|
plt.show()
|
||||||
|
maxList.append(max)
|
||||||
|
meanList.append(mean)
|
||||||
|
else:
|
||||||
|
if isPlot:
|
||||||
|
plt.figure(random.randint(1, 100))
|
||||||
|
plt.plot(mse)
|
||||||
|
# plt.plot(min)
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
return mseList, meanList, maxList
|
||||||
|
# pass
|
||||||
|
|
||||||
|
|
||||||
|
# healthy_data是健康数据,用于确定阈值,all_data是完整的数据,用于模型出结果
|
||||||
|
def getResult(model: tf.keras.Model, healthy_data, healthy_label, unhealthy_data, unhealthy_label, isPlot: bool = False,
|
||||||
|
isSave: bool = False, predictI: int = 1):
|
||||||
|
# TODO 计算MSE确定阈值
|
||||||
|
# plt.ion()
|
||||||
|
mseList, meanList, maxList = get_MSE(healthy_data, healthy_label, model)
|
||||||
|
mse1List, _, _ = get_MSE(unhealthy_data, unhealthy_label, model, isStandard=False)
|
||||||
|
|
||||||
|
for mse, mean, max, mse1,j in zip(mseList, meanList, maxList, mse1List,range(3)):
|
||||||
|
|
||||||
|
# 误报率的计算
|
||||||
|
total, = mse.shape
|
||||||
|
faultNum = 0
|
||||||
|
faultList = []
|
||||||
|
for i in range(total):
|
||||||
|
if (mse[i] > max[i]):
|
||||||
|
faultNum += 1
|
||||||
|
faultList.append(mse[i])
|
||||||
|
|
||||||
|
fault_rate = faultNum / total
|
||||||
|
print("误报率:", fault_rate)
|
||||||
|
|
||||||
|
# 漏报率计算
|
||||||
|
missNum = 0
|
||||||
|
missList = []
|
||||||
|
all, = mse1.shape
|
||||||
|
for i in range(all):
|
||||||
|
if (mse1[i] < max[0]):
|
||||||
|
missNum += 1
|
||||||
|
missList.append(mse1[i])
|
||||||
|
|
||||||
|
miss_rate = missNum / all
|
||||||
|
print("漏报率:", miss_rate)
|
||||||
|
|
||||||
|
# 总体图
|
||||||
|
print("mse:", mse)
|
||||||
|
print("mse1:", mse1)
|
||||||
|
print("============================================")
|
||||||
|
total_mse = np.concatenate([mse, mse1], axis=0)
|
||||||
|
total_max = np.broadcast_to(max[0], shape=[total_mse.shape[0], ])
|
||||||
|
# min = np.broadcast_to(min,shape=[dims,])
|
||||||
|
total_mean = np.broadcast_to(mean[0], shape=[total_mse.shape[0], ])
|
||||||
|
|
||||||
|
if isSave:
|
||||||
|
save_mse_name1=save_mse_name[:-4]+"_predict"+str(j+1)+".csv"
|
||||||
|
save_max_name1=save_max_name[:-4]+"_predict"+str(j+1)+".csv"
|
||||||
|
|
||||||
|
np.savetxt(save_mse_name1,total_mse, delimiter=',')
|
||||||
|
np.savetxt(save_max_name1,total_max, delimiter=',')
|
||||||
|
|
||||||
|
|
||||||
|
plt.figure(random.randint(1, 100))
|
||||||
|
plt.plot(total_max)
|
||||||
|
plt.plot(total_mse)
|
||||||
|
plt.plot(total_mean)
|
||||||
|
# plt.plot(min)
|
||||||
|
plt.show()
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
total_data = loadData.execute(N=feature_num, file_name=file_name)
|
||||||
|
total_data = normalization(data=total_data)
|
||||||
|
train_data_healthy, train_label1_healthy, train_label2_healthy = get_training_data_overlapping(
|
||||||
|
total_data[:healthy_date, :], is_Healthy=True)
|
||||||
|
train_data_unhealthy, train_label1_unhealthy, train_label2_unhealthy = get_training_data_overlapping(
|
||||||
|
total_data[healthy_date - time_stamp + unhealthy_patience:unhealthy_date, :],
|
||||||
|
is_Healthy=False)
|
||||||
|
#### TODO 第一步训练
|
||||||
|
# 单次测试
|
||||||
|
# train_step_one(train_data=train_data_healthy[:32, :, :], train_label1=train_label1_healthy[:32, :],train_label2=train_label2_healthy[:32, ])
|
||||||
|
# train_step_one(train_data=train_data_healthy, train_label1=train_label1_healthy, train_label2=train_label2_healthy)
|
||||||
|
|
||||||
|
# 导入第一步已经训练好的模型,一个继续训练,一个只输出结果
|
||||||
|
# step_one_model = Joint_Monitoring()
|
||||||
|
# # step_one_model.load_weights(save_name)
|
||||||
|
# #
|
||||||
|
# step_two_model = Joint_Monitoring()
|
||||||
|
# step_two_model.load_weights(save_name)
|
||||||
|
|
||||||
|
#### TODO 第二步训练
|
||||||
|
### healthy_data.shape: (300333,120,10)
|
||||||
|
### unhealthy_data.shape: (16594,10)
|
||||||
|
healthy_size, _, _ = train_data_healthy.shape
|
||||||
|
unhealthy_size, _, _ = train_data_unhealthy.shape
|
||||||
|
train_data, train_label1, train_label2, test_data, test_label1, test_label2 = split_test_data(
|
||||||
|
healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:, :, :],
|
||||||
|
healthy_label1=train_label1_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||||
|
healthy_label2=train_label2_healthy[healthy_size - 2 * unhealthy_size:, ], unhealthy_data=train_data_unhealthy,
|
||||||
|
unhealthy_label1=train_label1_unhealthy, unhealthy_label2=train_label2_unhealthy)
|
||||||
|
# train_step_two(step_one_model=step_one_model, step_two_model=step_two_model,
|
||||||
|
# train_data=train_data,
|
||||||
|
# train_label1=train_label1, train_label2=np.expand_dims(train_label2, axis=-1))
|
||||||
|
|
||||||
|
healthy_size, _, _ = train_data_healthy.shape
|
||||||
|
unhealthy_size, _, _ = train_data_unhealthy.shape
|
||||||
|
# all_data, _, _ = get_training_data_overlapping(
|
||||||
|
# total_data[healthy_size - 2 * unhealthy_size:unhealthy_date, :], is_Healthy=True)
|
||||||
|
|
||||||
|
##出结果单次测试
|
||||||
|
# getResult(step_one_model,
|
||||||
|
# healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:healthy_size - 2 * unhealthy_size + 200,
|
||||||
|
# :],
|
||||||
|
# healthy_label=train_label1_healthy[
|
||||||
|
# healthy_size - 2 * unhealthy_size:healthy_size - 2 * unhealthy_size + 200, :],
|
||||||
|
# unhealthy_data=train_data_unhealthy[:200, :], unhealthy_label=train_label1_unhealthy[:200, :],isSave=True)
|
||||||
|
|
||||||
|
### TODO 测试测试集
|
||||||
|
# step_one_model = Joint_Monitoring()
|
||||||
|
# step_one_model.load_weights(save_name)
|
||||||
|
step_two_model = Joint_Monitoring()
|
||||||
|
step_two_model.load_weights(save_step_two_name)
|
||||||
|
# test(step_one_model=step_one_model, step_two_model=step_two_model, test_data=test_data, test_label1=test_label1,
|
||||||
|
# test_label2=np.expand_dims(test_label2, axis=-1))
|
||||||
|
|
||||||
|
###TODO 展示全部的结果
|
||||||
|
all_data, _, _ = get_training_data_overlapping(
|
||||||
|
total_data[healthy_size - 2 * unhealthy_size:unhealthy_date, :], is_Healthy=True)
|
||||||
|
# all_data = np.concatenate([])
|
||||||
|
# 单次测试
|
||||||
|
# showResult(step_two_model, test_data=all_data[:32], isPlot=True)
|
||||||
|
showResult(step_two_model, test_data=all_data, isPlot=True)
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
@ -0,0 +1,714 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# coding: utf-8
|
||||||
|
|
||||||
|
'''
|
||||||
|
@Author : dingjiawen
|
||||||
|
@Date : 2022/10/11 18:55
|
||||||
|
@Usage :
|
||||||
|
@Desc : RNet直接进行分类
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
import tensorflow as tf
|
||||||
|
import tensorflow.keras
|
||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
from model.DepthwiseCon1D.DepthwiseConv1D import DepthwiseConv1D
|
||||||
|
from model.Dynamic_channelAttention.Dynamic_channelAttention import DynamicChannelAttention
|
||||||
|
from condition_monitoring.data_deal import loadData
|
||||||
|
from model.Joint_Monitoring.compare.RNet_35 import Joint_Monitoring
|
||||||
|
|
||||||
|
from model.CommonFunction.CommonFunction import *
|
||||||
|
from sklearn.model_selection import train_test_split
|
||||||
|
from tensorflow.keras.models import load_model, save_model
|
||||||
|
import random
|
||||||
|
|
||||||
|
'''超参数设置'''
|
||||||
|
time_stamp = 120
|
||||||
|
feature_num = 10
|
||||||
|
batch_size = 16
|
||||||
|
learning_rate = 0.001
|
||||||
|
EPOCH = 101
|
||||||
|
model_name = "RNet_C"
|
||||||
|
'''EWMA超参数'''
|
||||||
|
K = 18
|
||||||
|
namuda = 0.01
|
||||||
|
'''保存名称'''
|
||||||
|
|
||||||
|
save_name = "./model/weight/{0}/weight".format(model_name,
|
||||||
|
time_stamp,
|
||||||
|
feature_num,
|
||||||
|
batch_size,
|
||||||
|
EPOCH)
|
||||||
|
save_step_two_name = "./model/two_weight/{0}_weight_epoch6_99899_9996/weight".format(model_name,
|
||||||
|
time_stamp,
|
||||||
|
feature_num,
|
||||||
|
batch_size,
|
||||||
|
EPOCH)
|
||||||
|
|
||||||
|
save_mse_name = "./mse/RNet_C/{0}_timestamp{1}_feature{2}_result.csv".format(model_name,
|
||||||
|
time_stamp,
|
||||||
|
feature_num,
|
||||||
|
batch_size,
|
||||||
|
EPOCH)
|
||||||
|
save_max_name = "./mse/RNet_C/{0}_timestamp{1}_feature{2}_max.csv".format(model_name,
|
||||||
|
time_stamp,
|
||||||
|
feature_num,
|
||||||
|
batch_size,
|
||||||
|
EPOCH)
|
||||||
|
|
||||||
|
'''文件名'''
|
||||||
|
file_name = "G:\data\SCADA数据\jb4q_8_delete_total_zero.csv"
|
||||||
|
|
||||||
|
'''
|
||||||
|
文件说明:jb4q_8_delete_total_zero.csv是删除了只删除了全是0的列的文件
|
||||||
|
文件从0:415548行均是正常值(2019/7.30 00:00:00 - 2019/9/18 11:14:00)
|
||||||
|
从415549:432153行均是异常值(2019/9/18 11:21:01 - 2021/1/18 00:00:00)
|
||||||
|
'''
|
||||||
|
'''文件参数'''
|
||||||
|
# 最后正常的时间点
|
||||||
|
healthy_date = 415548
|
||||||
|
# 最后异常的时间点
|
||||||
|
unhealthy_date = 432153
|
||||||
|
# 异常容忍程度
|
||||||
|
unhealthy_patience = 5
|
||||||
|
|
||||||
|
|
||||||
|
# 画图相关设置
|
||||||
|
font1 = {'family': 'Times New Roman', 'weight': 'normal', 'size': 10} # 设置坐标标签的字体大小,字体
|
||||||
|
|
||||||
|
|
||||||
|
def remove(data, time_stamp=time_stamp):
|
||||||
|
rows, cols = data.shape
|
||||||
|
print("remove_data.shape:", data.shape)
|
||||||
|
num = int(rows / time_stamp)
|
||||||
|
|
||||||
|
return data[:num * time_stamp, :]
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# 不重叠采样
|
||||||
|
def get_training_data(data, time_stamp: int = time_stamp):
|
||||||
|
removed_data = remove(data=data)
|
||||||
|
rows, cols = removed_data.shape
|
||||||
|
print("removed_data.shape:", data.shape)
|
||||||
|
print("removed_data:", removed_data)
|
||||||
|
train_data = np.reshape(removed_data, [-1, time_stamp, cols])
|
||||||
|
print("train_data:", train_data)
|
||||||
|
batchs, time_stamp, cols = train_data.shape
|
||||||
|
|
||||||
|
for i in range(1, batchs):
|
||||||
|
each_label = np.expand_dims(train_data[i, 0, :], axis=0)
|
||||||
|
if i == 1:
|
||||||
|
train_label = each_label
|
||||||
|
else:
|
||||||
|
train_label = np.concatenate([train_label, each_label], axis=0)
|
||||||
|
|
||||||
|
print("train_data.shape:", train_data.shape)
|
||||||
|
print("train_label.shape", train_label.shape)
|
||||||
|
return train_data[:-1, :], train_label
|
||||||
|
|
||||||
|
|
||||||
|
# 重叠采样
|
||||||
|
def get_training_data_overlapping(data, time_stamp: int = time_stamp, is_Healthy: bool = True):
|
||||||
|
rows, cols = data.shape
|
||||||
|
train_data = np.empty(shape=[rows - time_stamp - 1, time_stamp, cols])
|
||||||
|
train_label = np.empty(shape=[rows - time_stamp - 1, cols])
|
||||||
|
for i in range(rows):
|
||||||
|
if i + time_stamp >= rows:
|
||||||
|
break
|
||||||
|
if i + time_stamp < rows - 1:
|
||||||
|
train_data[i] = data[i:i + time_stamp]
|
||||||
|
train_label[i] = data[i + time_stamp]
|
||||||
|
|
||||||
|
print("重叠采样以后:")
|
||||||
|
print("data:", train_data) # (300334,120,10)
|
||||||
|
print("label:", train_label) # (300334,10)
|
||||||
|
|
||||||
|
if is_Healthy:
|
||||||
|
train_label2 = np.ones(shape=[train_label.shape[0]])
|
||||||
|
else:
|
||||||
|
train_label2 = np.zeros(shape=[train_label.shape[0]])
|
||||||
|
|
||||||
|
print("label2:", train_label2)
|
||||||
|
|
||||||
|
return train_data, train_label, train_label2
|
||||||
|
|
||||||
|
|
||||||
|
# RepConv重参数化卷积
|
||||||
|
def RepConv(input_tensor, k=3):
|
||||||
|
_, _, output_dim = input_tensor.shape
|
||||||
|
conv1 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=k, strides=1, padding='SAME')(input_tensor)
|
||||||
|
b1 = tf.keras.layers.BatchNormalization()(conv1)
|
||||||
|
|
||||||
|
conv2 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=1, strides=1, padding='SAME')(input_tensor)
|
||||||
|
b2 = tf.keras.layers.BatchNormalization()(conv2)
|
||||||
|
|
||||||
|
b3 = tf.keras.layers.BatchNormalization()(input_tensor)
|
||||||
|
|
||||||
|
out = tf.keras.layers.Add()([b1, b2, b3])
|
||||||
|
out = tf.nn.relu(out)
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
# RepBlock模块
|
||||||
|
def RepBlock(input_tensor, num: int = 3):
|
||||||
|
for i in range(num):
|
||||||
|
input_tensor = RepConv(input_tensor)
|
||||||
|
return input_tensor
|
||||||
|
|
||||||
|
|
||||||
|
# GAP 全局平均池化
|
||||||
|
def Global_avg_channelAttention(input_tensor):
|
||||||
|
_, length, channel = input_tensor.shape
|
||||||
|
DWC1 = DepthwiseConv1D(kernel_size=1, padding='SAME')(input_tensor)
|
||||||
|
GAP = tf.keras.layers.GlobalAvgPool1D()(DWC1)
|
||||||
|
c1 = tf.keras.layers.Conv1D(filters=channel, kernel_size=1, padding='SAME')(GAP)
|
||||||
|
s1 = tf.nn.sigmoid(c1)
|
||||||
|
output = tf.multiply(input_tensor, s1)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
# GDP 全局动态池化
|
||||||
|
def Global_Dynamic_channelAttention(input_tensor):
|
||||||
|
_, length, channel = input_tensor.shape
|
||||||
|
DWC1 = DepthwiseConv1D(kernel_size=1, padding='SAME')(input_tensor)
|
||||||
|
|
||||||
|
# GAP
|
||||||
|
GAP = tf.keras.layers.GlobalAvgPool1D()(DWC1)
|
||||||
|
c1 = tf.keras.layers.Conv1D(filters=channel, kernel_size=1, padding='SAME')(GAP)
|
||||||
|
s1 = tf.nn.sigmoid(c1)
|
||||||
|
|
||||||
|
# GMP
|
||||||
|
GMP = tf.keras.layers.GlobalMaxPool1D()(DWC1)
|
||||||
|
c2 = tf.keras.layers.Conv1D(filters=channel, kernel_size=1, padding='SAME')(GMP)
|
||||||
|
s3 = tf.nn.sigmoid(c2)
|
||||||
|
|
||||||
|
output = tf.multiply(input_tensor, s1)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
# 归一化
|
||||||
|
def normalization(data):
|
||||||
|
rows, cols = data.shape
|
||||||
|
print("归一化之前:", data)
|
||||||
|
print(data.shape)
|
||||||
|
print("======================")
|
||||||
|
|
||||||
|
# 归一化
|
||||||
|
max = np.max(data, axis=0)
|
||||||
|
max = np.broadcast_to(max, [rows, cols])
|
||||||
|
min = np.min(data, axis=0)
|
||||||
|
min = np.broadcast_to(min, [rows, cols])
|
||||||
|
|
||||||
|
data = (data - min) / (max - min)
|
||||||
|
print("归一化之后:", data)
|
||||||
|
print(data.shape)
|
||||||
|
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
# 正则化
|
||||||
|
def Regularization(data):
|
||||||
|
rows, cols = data.shape
|
||||||
|
print("正则化之前:", data)
|
||||||
|
print(data.shape)
|
||||||
|
print("======================")
|
||||||
|
|
||||||
|
# 正则化
|
||||||
|
mean = np.mean(data, axis=0)
|
||||||
|
mean = np.broadcast_to(mean, shape=[rows, cols])
|
||||||
|
dst = np.sqrt(np.var(data, axis=0))
|
||||||
|
dst = np.broadcast_to(dst, shape=[rows, cols])
|
||||||
|
data = (data - mean) / dst
|
||||||
|
print("正则化之后:", data)
|
||||||
|
print(data.shape)
|
||||||
|
|
||||||
|
return data
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def EWMA(data, K=K, namuda=namuda):
|
||||||
|
# t是啥暂时未知
|
||||||
|
t = 0
|
||||||
|
mid = np.mean(data, axis=0)
|
||||||
|
standard = np.sqrt(np.var(data, axis=0))
|
||||||
|
UCL = mid + K * standard * np.sqrt(namuda / (2 - namuda) * (1 - (1 - namuda) ** 2 * t))
|
||||||
|
LCL = mid - K * standard * np.sqrt(namuda / (2 - namuda) * (1 - (1 - namuda) ** 2 * t))
|
||||||
|
return mid, UCL, LCL
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def condition_monitoring_model():
|
||||||
|
input = tf.keras.Input(shape=[time_stamp, feature_num])
|
||||||
|
conv1 = tf.keras.layers.Conv1D(filters=256, kernel_size=1)(input)
|
||||||
|
GRU1 = tf.keras.layers.GRU(128, return_sequences=False)(conv1)
|
||||||
|
d1 = tf.keras.layers.Dense(300)(GRU1)
|
||||||
|
output = tf.keras.layers.Dense(10)(d1)
|
||||||
|
|
||||||
|
model = tf.keras.Model(inputs=input, outputs=output)
|
||||||
|
|
||||||
|
return model
|
||||||
|
|
||||||
|
|
||||||
|
# trian_data:(300455,120,10)
|
||||||
|
# trian_label1:(300455,10)
|
||||||
|
# trian_label2:(300455,)
|
||||||
|
def shuffle(train_data, train_label1, train_label2, is_split: bool = False, split_size: float = 0.2):
|
||||||
|
(train_data, test_data, train_label1, test_label1, train_label2, test_label2) = train_test_split(train_data,
|
||||||
|
train_label1,
|
||||||
|
train_label2,
|
||||||
|
test_size=split_size,
|
||||||
|
shuffle=True,
|
||||||
|
random_state=100)
|
||||||
|
if is_split:
|
||||||
|
return train_data, train_label1, train_label2, test_data, test_label1, test_label2
|
||||||
|
train_data = np.concatenate([train_data, test_data], axis=0)
|
||||||
|
train_label1 = np.concatenate([train_label1, test_label1], axis=0)
|
||||||
|
train_label2 = np.concatenate([train_label2, test_label2], axis=0)
|
||||||
|
# print(train_data.shape)
|
||||||
|
# print(train_label1.shape)
|
||||||
|
# print(train_label2.shape)
|
||||||
|
# print(train_data.shape)
|
||||||
|
|
||||||
|
return train_data, train_label1, train_label2
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def split_test_data(healthy_data, healthy_label1, healthy_label2, unhealthy_data, unhealthy_label1, unhealthy_label2,
|
||||||
|
split_size: float = 0.2, shuffle: bool = True):
|
||||||
|
data = np.concatenate([healthy_data, unhealthy_data], axis=0)
|
||||||
|
label1 = np.concatenate([healthy_label1, unhealthy_label1], axis=0)
|
||||||
|
label2 = np.concatenate([healthy_label2, unhealthy_label2], axis=0)
|
||||||
|
(train_data, test_data, train_label1, test_label1, train_label2, test_label2) = train_test_split(data,
|
||||||
|
label1,
|
||||||
|
label2,
|
||||||
|
test_size=split_size,
|
||||||
|
shuffle=shuffle,
|
||||||
|
random_state=100)
|
||||||
|
|
||||||
|
# print(train_data.shape)
|
||||||
|
# print(train_label1.shape)
|
||||||
|
# print(train_label2.shape)
|
||||||
|
# print(train_data.shape)
|
||||||
|
|
||||||
|
return train_data, train_label1, train_label2, test_data, test_label1, test_label2
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# trian_data:(300455,120,10)
|
||||||
|
# trian_label1:(300455,10)
|
||||||
|
# trian_label2:(300455,)
|
||||||
|
def train_step_one(train_data, train_label1, train_label2):
|
||||||
|
model = Joint_Monitoring()
|
||||||
|
# # # # TODO 需要运行编译一次,才能打印model.summary()
|
||||||
|
# model.build(input_shape=(batch_size, filter_num, dims))
|
||||||
|
# model.summary()
|
||||||
|
history_loss = []
|
||||||
|
history_val_loss = []
|
||||||
|
learning_rate = 1e-3
|
||||||
|
for epoch in range(EPOCH):
|
||||||
|
|
||||||
|
print()
|
||||||
|
print("EPOCH:", epoch, "/", EPOCH, ":")
|
||||||
|
train_data, train_label1, train_label2 = shuffle(train_data, train_label1, train_label2)
|
||||||
|
if epoch == 0:
|
||||||
|
train_data, train_label1, train_label2, val_data, val_label1, val_label2 = shuffle(train_data, train_label1,
|
||||||
|
train_label2,
|
||||||
|
is_split=True)
|
||||||
|
# print()
|
||||||
|
# print("EPOCH:", epoch, "/", EPOCH, ":")
|
||||||
|
# 用于让train知道,这是这个epoch中的第几次训练
|
||||||
|
z = 0
|
||||||
|
# 用于batch_size次再训练
|
||||||
|
k = 1
|
||||||
|
for data_1, label_1, label_2 in zip(train_data, train_label1, train_label2):
|
||||||
|
size, _, _ = train_data.shape
|
||||||
|
data_1 = tf.expand_dims(data_1, axis=0)
|
||||||
|
label_1 = tf.expand_dims(label_1, axis=0)
|
||||||
|
label_2 = tf.expand_dims(label_2, axis=0)
|
||||||
|
if batch_size != 1:
|
||||||
|
if k % batch_size == 1:
|
||||||
|
data = data_1
|
||||||
|
label1 = label_1
|
||||||
|
label2 = label_2
|
||||||
|
else:
|
||||||
|
data = tf.concat([data, data_1], axis=0)
|
||||||
|
label1 = tf.concat([label1, label_1], axis=0)
|
||||||
|
label2 = tf.concat([label2, label_2], axis=0)
|
||||||
|
else:
|
||||||
|
data = data_1
|
||||||
|
label1 = label_1
|
||||||
|
label2 = label_2
|
||||||
|
|
||||||
|
if k % batch_size == 0:
|
||||||
|
# label = tf.expand_dims(label, axis=-1)
|
||||||
|
loss_value, accuracy_value = model.train(input_tensor=data, label1=label1, label2=label2,
|
||||||
|
learning_rate=learning_rate,
|
||||||
|
is_first_time=True)
|
||||||
|
print(z * batch_size, "/", size, ":===============>", "loss:", loss_value.numpy())
|
||||||
|
k = 0
|
||||||
|
z = z + 1
|
||||||
|
k = k + 1
|
||||||
|
val_loss, val_accuracy = model.get_val_loss(val_data=val_data, val_label1=val_label1, val_label2=val_label2,
|
||||||
|
is_first_time=True)
|
||||||
|
SaveBestModel(model=model, save_name=save_name, history_loss=history_val_loss, loss_value=val_loss.numpy())
|
||||||
|
# SaveBestH5Model(model=model, save_name=save_name, history_loss=history_val_loss, loss_value=val_loss.numpy())
|
||||||
|
history_val_loss.append(val_loss)
|
||||||
|
history_loss.append(loss_value.numpy())
|
||||||
|
print('Training loss is :', loss_value.numpy())
|
||||||
|
print('Validating loss is :', val_loss.numpy())
|
||||||
|
if IsStopTraining(history_loss=history_val_loss, patience=7):
|
||||||
|
break
|
||||||
|
if Is_Reduce_learning_rate(history_loss=history_val_loss, patience=3):
|
||||||
|
if learning_rate >= 1e-4:
|
||||||
|
learning_rate = learning_rate * 0.1
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def train_step_two(step_one_model, step_two_model, train_data, train_label1, train_label2):
|
||||||
|
# step_two_model = Joint_Monitoring()
|
||||||
|
# step_two_model.build(input_shape=(batch_size, time_stamp, feature_num))
|
||||||
|
# step_two_model.summary()
|
||||||
|
history_loss = []
|
||||||
|
history_val_loss = []
|
||||||
|
history_accuracy = []
|
||||||
|
learning_rate = 1e-3
|
||||||
|
for epoch in range(EPOCH):
|
||||||
|
print()
|
||||||
|
print("EPOCH:", epoch, "/", EPOCH, ":")
|
||||||
|
train_data, train_label1, train_label2 = shuffle(train_data, train_label1, train_label2)
|
||||||
|
if epoch == 0:
|
||||||
|
train_data, train_label1, train_label2, val_data, val_label1, val_label2 = shuffle(train_data, train_label1,
|
||||||
|
train_label2,
|
||||||
|
is_split=True)
|
||||||
|
# print()
|
||||||
|
# print("EPOCH:", epoch, "/", EPOCH, ":")
|
||||||
|
# 用于让train知道,这是这个epoch中的第几次训练
|
||||||
|
z = 0
|
||||||
|
# 用于batch_size次再训练
|
||||||
|
k = 1
|
||||||
|
accuracy_num = 0
|
||||||
|
for data_1, label_1, label_2 in zip(train_data, train_label1, train_label2):
|
||||||
|
size, _, _ = train_data.shape
|
||||||
|
data_1 = tf.expand_dims(data_1, axis=0)
|
||||||
|
label_1 = tf.expand_dims(label_1, axis=0)
|
||||||
|
label_2 = tf.expand_dims(label_2, axis=0)
|
||||||
|
if batch_size != 1:
|
||||||
|
if k % batch_size == 1:
|
||||||
|
data = data_1
|
||||||
|
label1 = label_1
|
||||||
|
label2 = label_2
|
||||||
|
else:
|
||||||
|
data = tf.concat([data, data_1], axis=0)
|
||||||
|
label1 = tf.concat([label1, label_1], axis=0)
|
||||||
|
label2 = tf.concat([label2, label_2], axis=0)
|
||||||
|
else:
|
||||||
|
data = data_1
|
||||||
|
label1 = label_1
|
||||||
|
label2 = label_2
|
||||||
|
|
||||||
|
if k % batch_size == 0:
|
||||||
|
# label = tf.expand_dims(label, axis=-1)
|
||||||
|
# output1, output2, output3, _ = step_one_model.call(inputs=data, is_first_time=True)
|
||||||
|
loss_value, accuracy_value = step_two_model.train(input_tensor=data, label1=label1, label2=label2,
|
||||||
|
learning_rate=learning_rate,
|
||||||
|
is_first_time=False)
|
||||||
|
accuracy_num += accuracy_value
|
||||||
|
print(z * batch_size, "/", size, ":===============>", "loss:", loss_value.numpy(), "| accuracy:",
|
||||||
|
accuracy_num / ((z + 1) * batch_size))
|
||||||
|
k = 0
|
||||||
|
z = z + 1
|
||||||
|
k = k + 1
|
||||||
|
|
||||||
|
val_loss, val_accuracy = step_two_model.get_val_loss(val_data=val_data, val_label1=val_label1,
|
||||||
|
val_label2=val_label2,
|
||||||
|
is_first_time=False, step_one_model=step_one_model)
|
||||||
|
SaveBestModelByAccuracy(model=step_two_model, save_name=save_step_two_name, history_accuracy=history_accuracy,
|
||||||
|
accuracy_value=val_accuracy)
|
||||||
|
history_val_loss.append(val_loss)
|
||||||
|
history_loss.append(loss_value.numpy())
|
||||||
|
history_accuracy.append(val_accuracy)
|
||||||
|
print('Training loss is : {0} | Training accuracy is : {1}'.format(loss_value.numpy(),
|
||||||
|
accuracy_num / ((z + 1) * batch_size)))
|
||||||
|
print('Validating loss is : {0} | Validating accuracy is : {1}'.format(val_loss.numpy(), val_accuracy))
|
||||||
|
if IsStopTraining(history_loss=history_val_loss, patience=7):
|
||||||
|
break
|
||||||
|
if Is_Reduce_learning_rate(history_loss=history_val_loss, patience=3):
|
||||||
|
if learning_rate >= 1e-4:
|
||||||
|
learning_rate = learning_rate * 0.1
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def test(step_one_model, step_two_model, test_data, test_label1, test_label2):
|
||||||
|
history_loss = []
|
||||||
|
history_val_loss = []
|
||||||
|
|
||||||
|
val_loss, val_accuracy = step_two_model.get_val_loss(val_data=test_data, val_label1=test_label1,
|
||||||
|
val_label2=test_label2,
|
||||||
|
is_first_time=False, step_one_model=step_one_model)
|
||||||
|
|
||||||
|
history_val_loss.append(val_loss)
|
||||||
|
print("val_accuracy:", val_accuracy)
|
||||||
|
print("val_loss:", val_loss)
|
||||||
|
|
||||||
|
|
||||||
|
def showResult(step_two_model: Joint_Monitoring, test_data, isPlot: bool = False,isSave:bool=True):
|
||||||
|
# 获取模型的所有参数的个数
|
||||||
|
# step_two_model.count_params()
|
||||||
|
total_result = []
|
||||||
|
size, length, dims = test_data.shape
|
||||||
|
for epoch in range(0, size - batch_size + 1, batch_size):
|
||||||
|
each_test_data = test_data[epoch:epoch + batch_size, :, :]
|
||||||
|
_, _, _, output4 = step_two_model.call(each_test_data, is_first_time=False)
|
||||||
|
total_result.append(output4)
|
||||||
|
total_result = np.reshape(total_result, [total_result.__len__(), -1])
|
||||||
|
total_result = np.reshape(total_result, [-1, ])
|
||||||
|
|
||||||
|
#误报率,漏报率,准确性的计算
|
||||||
|
|
||||||
|
|
||||||
|
if isSave:
|
||||||
|
|
||||||
|
np.savetxt(save_mse_name, total_result, delimiter=',')
|
||||||
|
if isPlot:
|
||||||
|
plt.figure(1, figsize=(6.0, 2.68))
|
||||||
|
plt.subplots_adjust(left=0.1, right=0.94, bottom=0.2, top=0.9, wspace=None,
|
||||||
|
hspace=None)
|
||||||
|
plt.tight_layout()
|
||||||
|
font1 = {'family': 'Times New Roman', 'weight': 'normal', 'size': 10} # 设置坐标标签的字体大小,字体
|
||||||
|
|
||||||
|
plt.scatter(list(range(total_result.shape[0])), total_result, c='black', s=10)
|
||||||
|
# 画出 y=1 这条水平线
|
||||||
|
plt.axhline(0.5, c='red', label='Failure threshold')
|
||||||
|
# 箭头指向上面的水平线
|
||||||
|
# plt.arrow(35000, 0.9, 33000, 0.75, head_width=0.02, head_length=0.1, shape="full", fc='red', ec='red',
|
||||||
|
# alpha=0.9, overhang=0.5)
|
||||||
|
plt.text(test_data.shape[0] * 2 / 3+1000, 0.7, "Truth Fault", fontsize=10, color='red', verticalalignment='top')
|
||||||
|
plt.axvline(test_data.shape[0] * 2 / 3, c='blue', ls='-.')
|
||||||
|
plt.xticks(range(6),('06/09/17','12/09/17','18/09/17','24/09/17','29/09/17')) # 设置x轴的标尺
|
||||||
|
plt.tick_params() #设置轴显示
|
||||||
|
plt.xlabel("time",fontdict=font1)
|
||||||
|
plt.ylabel("confience",fontdict=font1)
|
||||||
|
plt.text(total_result.shape[0] * 4 / 5, 0.6, "Fault", fontsize=10, color='black', verticalalignment='top',
|
||||||
|
horizontalalignment='center',
|
||||||
|
bbox={'facecolor': 'grey',
|
||||||
|
'pad': 10},fontdict=font1)
|
||||||
|
plt.text(total_result.shape[0] * 1 / 3, 0.4, "Norm", fontsize=10, color='black', verticalalignment='top',
|
||||||
|
horizontalalignment='center',
|
||||||
|
bbox={'facecolor': 'grey',
|
||||||
|
'pad': 10},fontdict=font1)
|
||||||
|
|
||||||
|
plt.grid()
|
||||||
|
# plt.ylim(0, 1)
|
||||||
|
# plt.xlim(-50, 1300)
|
||||||
|
# plt.legend("", loc='upper left')
|
||||||
|
plt.show()
|
||||||
|
return total_result
|
||||||
|
|
||||||
|
|
||||||
|
def get_MSE(data, label, new_model, isStandard: bool = True, isPlot: bool = True, predictI: int = 1):
|
||||||
|
predicted_data1 = []
|
||||||
|
predicted_data2 = []
|
||||||
|
predicted_data3 = []
|
||||||
|
size, length, dims = data.shape
|
||||||
|
for epoch in range(0, size, batch_size):
|
||||||
|
each_test_data = data[epoch:epoch + batch_size, :, :]
|
||||||
|
output1, output2, output3, _ = new_model.call(inputs=each_test_data, is_first_time=True)
|
||||||
|
if epoch == 0:
|
||||||
|
predicted_data1 = output1
|
||||||
|
predicted_data2 = output2
|
||||||
|
predicted_data3 = output3
|
||||||
|
else:
|
||||||
|
predicted_data1 = np.concatenate([predicted_data1, output1], axis=0)
|
||||||
|
predicted_data2 = np.concatenate([predicted_data2, output2], axis=0)
|
||||||
|
predicted_data3 = np.concatenate([predicted_data3, output3], axis=0)
|
||||||
|
|
||||||
|
predicted_data1 = np.reshape(predicted_data1, [-1, 10])
|
||||||
|
predicted_data2 = np.reshape(predicted_data2, [-1, 10])
|
||||||
|
predicted_data3 = np.reshape(predicted_data3, [-1, 10])
|
||||||
|
predict_data = 0
|
||||||
|
|
||||||
|
predict_data = predicted_data1
|
||||||
|
mseList = []
|
||||||
|
meanList = []
|
||||||
|
maxList = []
|
||||||
|
|
||||||
|
for i in range(1, 4):
|
||||||
|
print("i:", i)
|
||||||
|
if i == 1:
|
||||||
|
predict_data = predicted_data1
|
||||||
|
elif i == 2:
|
||||||
|
predict_data = predicted_data2
|
||||||
|
elif i == 3:
|
||||||
|
predict_data = predicted_data3
|
||||||
|
temp = np.abs(predict_data - label)
|
||||||
|
temp1 = (temp - np.broadcast_to(np.mean(temp, axis=0), shape=predict_data.shape))
|
||||||
|
temp2 = np.broadcast_to(np.sqrt(np.var(temp, axis=0)), shape=predict_data.shape)
|
||||||
|
temp3 = temp1 / temp2
|
||||||
|
mse = np.sum((temp1 / temp2) ** 2, axis=1)
|
||||||
|
|
||||||
|
print("mse.shape:", mse.shape)
|
||||||
|
# mse=np.mean((predicted_data-label)**2,axis=1)
|
||||||
|
# print("mse", mse)
|
||||||
|
mseList.append(mse)
|
||||||
|
if isStandard:
|
||||||
|
dims, = mse.shape
|
||||||
|
mean = np.mean(mse)
|
||||||
|
std = np.sqrt(np.var(mse))
|
||||||
|
max = mean + 3 * std
|
||||||
|
print("max.shape:", max.shape)
|
||||||
|
# min = mean-3*std
|
||||||
|
max = np.broadcast_to(max, shape=[dims, ])
|
||||||
|
# min = np.broadcast_to(min,shape=[dims,])
|
||||||
|
mean = np.broadcast_to(mean, shape=[dims, ])
|
||||||
|
if isPlot:
|
||||||
|
plt.figure(random.randint(1, 100))
|
||||||
|
plt.plot(max)
|
||||||
|
plt.plot(mse)
|
||||||
|
plt.plot(mean)
|
||||||
|
# plt.plot(min)
|
||||||
|
plt.show()
|
||||||
|
maxList.append(max)
|
||||||
|
meanList.append(mean)
|
||||||
|
else:
|
||||||
|
if isPlot:
|
||||||
|
plt.figure(random.randint(1, 100))
|
||||||
|
plt.plot(mse)
|
||||||
|
# plt.plot(min)
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
return mseList, meanList, maxList
|
||||||
|
# pass
|
||||||
|
|
||||||
|
|
||||||
|
# healthy_data是健康数据,用于确定阈值,all_data是完整的数据,用于模型出结果
|
||||||
|
def getResult(model: tf.keras.Model, healthy_data, healthy_label, unhealthy_data, unhealthy_label, isPlot: bool = False,
|
||||||
|
isSave: bool = False, predictI: int = 1):
|
||||||
|
# TODO 计算MSE确定阈值
|
||||||
|
# plt.ion()
|
||||||
|
mseList, meanList, maxList = get_MSE(healthy_data, healthy_label, model)
|
||||||
|
mse1List, _, _ = get_MSE(unhealthy_data, unhealthy_label, model, isStandard=False)
|
||||||
|
|
||||||
|
for mse, mean, max, mse1,j in zip(mseList, meanList, maxList, mse1List,range(3)):
|
||||||
|
|
||||||
|
# 误报率的计算
|
||||||
|
total, = mse.shape
|
||||||
|
faultNum = 0
|
||||||
|
faultList = []
|
||||||
|
for i in range(total):
|
||||||
|
if (mse[i] > max[i]):
|
||||||
|
faultNum += 1
|
||||||
|
faultList.append(mse[i])
|
||||||
|
|
||||||
|
fault_rate = faultNum / total
|
||||||
|
print("误报率:", fault_rate)
|
||||||
|
|
||||||
|
# 漏报率计算
|
||||||
|
missNum = 0
|
||||||
|
missList = []
|
||||||
|
all, = mse1.shape
|
||||||
|
for i in range(all):
|
||||||
|
if (mse1[i] < max[0]):
|
||||||
|
missNum += 1
|
||||||
|
missList.append(mse1[i])
|
||||||
|
|
||||||
|
miss_rate = missNum / all
|
||||||
|
print("漏报率:", miss_rate)
|
||||||
|
|
||||||
|
# 总体图
|
||||||
|
print("mse:", mse)
|
||||||
|
print("mse1:", mse1)
|
||||||
|
print("============================================")
|
||||||
|
total_mse = np.concatenate([mse, mse1], axis=0)
|
||||||
|
total_max = np.broadcast_to(max[0], shape=[total_mse.shape[0], ])
|
||||||
|
# min = np.broadcast_to(min,shape=[dims,])
|
||||||
|
total_mean = np.broadcast_to(mean[0], shape=[total_mse.shape[0], ])
|
||||||
|
|
||||||
|
if isSave:
|
||||||
|
save_mse_name1=save_mse_name[:-4]+"_predict"+str(j+1)+".csv"
|
||||||
|
save_max_name1=save_max_name[:-4]+"_predict"+str(j+1)+".csv"
|
||||||
|
|
||||||
|
np.savetxt(save_mse_name1,total_mse, delimiter=',')
|
||||||
|
np.savetxt(save_max_name1,total_max, delimiter=',')
|
||||||
|
|
||||||
|
|
||||||
|
plt.figure(random.randint(1, 100))
|
||||||
|
plt.plot(total_max)
|
||||||
|
plt.plot(total_mse)
|
||||||
|
plt.plot(total_mean)
|
||||||
|
# plt.plot(min)
|
||||||
|
plt.show()
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
total_data = loadData.execute(N=feature_num, file_name=file_name)
|
||||||
|
total_data = normalization(data=total_data)
|
||||||
|
train_data_healthy, train_label1_healthy, train_label2_healthy = get_training_data_overlapping(
|
||||||
|
total_data[:healthy_date, :], is_Healthy=True)
|
||||||
|
train_data_unhealthy, train_label1_unhealthy, train_label2_unhealthy = get_training_data_overlapping(
|
||||||
|
total_data[healthy_date - time_stamp + unhealthy_patience:unhealthy_date, :],
|
||||||
|
is_Healthy=False)
|
||||||
|
#### TODO 第一步训练
|
||||||
|
# 单次测试
|
||||||
|
# train_step_one(train_data=train_data_healthy[:32, :, :], train_label1=train_label1_healthy[:32, :],train_label2=train_label2_healthy[:32, ])
|
||||||
|
# train_step_one(train_data=train_data_healthy, train_label1=train_label1_healthy, train_label2=train_label2_healthy)
|
||||||
|
|
||||||
|
# 导入第一步已经训练好的模型,一个继续训练,一个只输出结果
|
||||||
|
# step_one_model = Joint_Monitoring()
|
||||||
|
# # step_one_model.load_weights(save_name)
|
||||||
|
# #
|
||||||
|
# step_two_model = Joint_Monitoring()
|
||||||
|
# step_two_model.load_weights(save_name)
|
||||||
|
|
||||||
|
#### TODO 第二步训练
|
||||||
|
### healthy_data.shape: (300333,120,10)
|
||||||
|
### unhealthy_data.shape: (16594,10)
|
||||||
|
healthy_size, _, _ = train_data_healthy.shape
|
||||||
|
unhealthy_size, _, _ = train_data_unhealthy.shape
|
||||||
|
train_data, train_label1, train_label2, test_data, test_label1, test_label2 = split_test_data(
|
||||||
|
healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:, :, :],
|
||||||
|
healthy_label1=train_label1_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||||
|
healthy_label2=train_label2_healthy[healthy_size - 2 * unhealthy_size:, ], unhealthy_data=train_data_unhealthy,
|
||||||
|
unhealthy_label1=train_label1_unhealthy, unhealthy_label2=train_label2_unhealthy)
|
||||||
|
# train_step_two(step_one_model=step_one_model, step_two_model=step_two_model,
|
||||||
|
# train_data=train_data,
|
||||||
|
# train_label1=train_label1, train_label2=np.expand_dims(train_label2, axis=-1))
|
||||||
|
|
||||||
|
healthy_size, _, _ = train_data_healthy.shape
|
||||||
|
unhealthy_size, _, _ = train_data_unhealthy.shape
|
||||||
|
# all_data, _, _ = get_training_data_overlapping(
|
||||||
|
# total_data[healthy_size - 2 * unhealthy_size:unhealthy_date, :], is_Healthy=True)
|
||||||
|
|
||||||
|
##出结果单次测试
|
||||||
|
# getResult(step_one_model,
|
||||||
|
# healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:healthy_size - 2 * unhealthy_size + 200,
|
||||||
|
# :],
|
||||||
|
# healthy_label=train_label1_healthy[
|
||||||
|
# healthy_size - 2 * unhealthy_size:healthy_size - 2 * unhealthy_size + 200, :],
|
||||||
|
# unhealthy_data=train_data_unhealthy[:200, :], unhealthy_label=train_label1_unhealthy[:200, :],isSave=True)
|
||||||
|
|
||||||
|
### TODO 测试测试集
|
||||||
|
# step_one_model = Joint_Monitoring()
|
||||||
|
# step_one_model.load_weights(save_name)
|
||||||
|
step_two_model = Joint_Monitoring()
|
||||||
|
step_two_model.load_weights(save_step_two_name)
|
||||||
|
# test(step_one_model=step_one_model, step_two_model=step_two_model, test_data=test_data, test_label1=test_label1,
|
||||||
|
# test_label2=np.expand_dims(test_label2, axis=-1))
|
||||||
|
|
||||||
|
###TODO 展示全部的结果
|
||||||
|
all_data, _, _ = get_training_data_overlapping(
|
||||||
|
total_data[healthy_size - 2 * unhealthy_size:unhealthy_date, :], is_Healthy=True)
|
||||||
|
# all_data = np.concatenate([])
|
||||||
|
# 单次测试
|
||||||
|
# showResult(step_two_model, test_data=all_data[:32], isPlot=True)
|
||||||
|
showResult(step_two_model, test_data=all_data, isPlot=True)
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
@ -0,0 +1,714 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# coding: utf-8
|
||||||
|
|
||||||
|
'''
|
||||||
|
@Author : dingjiawen
|
||||||
|
@Date : 2022/10/11 18:55
|
||||||
|
@Usage :
|
||||||
|
@Desc : RNet直接进行分类
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
import tensorflow as tf
|
||||||
|
import tensorflow.keras
|
||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
from model.DepthwiseCon1D.DepthwiseConv1D import DepthwiseConv1D
|
||||||
|
from model.Dynamic_channelAttention.Dynamic_channelAttention import DynamicChannelAttention
|
||||||
|
from condition_monitoring.data_deal import loadData
|
||||||
|
from model.Joint_Monitoring.compare.RNet_4 import Joint_Monitoring
|
||||||
|
|
||||||
|
from model.CommonFunction.CommonFunction import *
|
||||||
|
from sklearn.model_selection import train_test_split
|
||||||
|
from tensorflow.keras.models import load_model, save_model
|
||||||
|
import random
|
||||||
|
|
||||||
|
'''超参数设置'''
|
||||||
|
time_stamp = 120
|
||||||
|
feature_num = 10
|
||||||
|
batch_size = 16
|
||||||
|
learning_rate = 0.001
|
||||||
|
EPOCH = 101
|
||||||
|
model_name = "RNet_C"
|
||||||
|
'''EWMA超参数'''
|
||||||
|
K = 18
|
||||||
|
namuda = 0.01
|
||||||
|
'''保存名称'''
|
||||||
|
|
||||||
|
save_name = "./model/weight/{0}/weight".format(model_name,
|
||||||
|
time_stamp,
|
||||||
|
feature_num,
|
||||||
|
batch_size,
|
||||||
|
EPOCH)
|
||||||
|
save_step_two_name = "./model/two_weight/{0}_weight_epoch6_99899_9996/weight".format(model_name,
|
||||||
|
time_stamp,
|
||||||
|
feature_num,
|
||||||
|
batch_size,
|
||||||
|
EPOCH)
|
||||||
|
|
||||||
|
save_mse_name = "./mse/RNet_C/{0}_timestamp{1}_feature{2}_result.csv".format(model_name,
|
||||||
|
time_stamp,
|
||||||
|
feature_num,
|
||||||
|
batch_size,
|
||||||
|
EPOCH)
|
||||||
|
save_max_name = "./mse/RNet_C/{0}_timestamp{1}_feature{2}_max.csv".format(model_name,
|
||||||
|
time_stamp,
|
||||||
|
feature_num,
|
||||||
|
batch_size,
|
||||||
|
EPOCH)
|
||||||
|
|
||||||
|
'''文件名'''
|
||||||
|
file_name = "G:\data\SCADA数据\jb4q_8_delete_total_zero.csv"
|
||||||
|
|
||||||
|
'''
|
||||||
|
文件说明:jb4q_8_delete_total_zero.csv是删除了只删除了全是0的列的文件
|
||||||
|
文件从0:415548行均是正常值(2019/7.30 00:00:00 - 2019/9/18 11:14:00)
|
||||||
|
从415549:432153行均是异常值(2019/9/18 11:21:01 - 2021/1/18 00:00:00)
|
||||||
|
'''
|
||||||
|
'''文件参数'''
|
||||||
|
# 最后正常的时间点
|
||||||
|
healthy_date = 415548
|
||||||
|
# 最后异常的时间点
|
||||||
|
unhealthy_date = 432153
|
||||||
|
# 异常容忍程度
|
||||||
|
unhealthy_patience = 5
|
||||||
|
|
||||||
|
|
||||||
|
# 画图相关设置
|
||||||
|
font1 = {'family': 'Times New Roman', 'weight': 'normal', 'size': 10} # 设置坐标标签的字体大小,字体
|
||||||
|
|
||||||
|
|
||||||
|
def remove(data, time_stamp=time_stamp):
|
||||||
|
rows, cols = data.shape
|
||||||
|
print("remove_data.shape:", data.shape)
|
||||||
|
num = int(rows / time_stamp)
|
||||||
|
|
||||||
|
return data[:num * time_stamp, :]
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# 不重叠采样
|
||||||
|
def get_training_data(data, time_stamp: int = time_stamp):
|
||||||
|
removed_data = remove(data=data)
|
||||||
|
rows, cols = removed_data.shape
|
||||||
|
print("removed_data.shape:", data.shape)
|
||||||
|
print("removed_data:", removed_data)
|
||||||
|
train_data = np.reshape(removed_data, [-1, time_stamp, cols])
|
||||||
|
print("train_data:", train_data)
|
||||||
|
batchs, time_stamp, cols = train_data.shape
|
||||||
|
|
||||||
|
for i in range(1, batchs):
|
||||||
|
each_label = np.expand_dims(train_data[i, 0, :], axis=0)
|
||||||
|
if i == 1:
|
||||||
|
train_label = each_label
|
||||||
|
else:
|
||||||
|
train_label = np.concatenate([train_label, each_label], axis=0)
|
||||||
|
|
||||||
|
print("train_data.shape:", train_data.shape)
|
||||||
|
print("train_label.shape", train_label.shape)
|
||||||
|
return train_data[:-1, :], train_label
|
||||||
|
|
||||||
|
|
||||||
|
# 重叠采样
|
||||||
|
def get_training_data_overlapping(data, time_stamp: int = time_stamp, is_Healthy: bool = True):
|
||||||
|
rows, cols = data.shape
|
||||||
|
train_data = np.empty(shape=[rows - time_stamp - 1, time_stamp, cols])
|
||||||
|
train_label = np.empty(shape=[rows - time_stamp - 1, cols])
|
||||||
|
for i in range(rows):
|
||||||
|
if i + time_stamp >= rows:
|
||||||
|
break
|
||||||
|
if i + time_stamp < rows - 1:
|
||||||
|
train_data[i] = data[i:i + time_stamp]
|
||||||
|
train_label[i] = data[i + time_stamp]
|
||||||
|
|
||||||
|
print("重叠采样以后:")
|
||||||
|
print("data:", train_data) # (300334,120,10)
|
||||||
|
print("label:", train_label) # (300334,10)
|
||||||
|
|
||||||
|
if is_Healthy:
|
||||||
|
train_label2 = np.ones(shape=[train_label.shape[0]])
|
||||||
|
else:
|
||||||
|
train_label2 = np.zeros(shape=[train_label.shape[0]])
|
||||||
|
|
||||||
|
print("label2:", train_label2)
|
||||||
|
|
||||||
|
return train_data, train_label, train_label2
|
||||||
|
|
||||||
|
|
||||||
|
# RepConv重参数化卷积
|
||||||
|
def RepConv(input_tensor, k=3):
|
||||||
|
_, _, output_dim = input_tensor.shape
|
||||||
|
conv1 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=k, strides=1, padding='SAME')(input_tensor)
|
||||||
|
b1 = tf.keras.layers.BatchNormalization()(conv1)
|
||||||
|
|
||||||
|
conv2 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=1, strides=1, padding='SAME')(input_tensor)
|
||||||
|
b2 = tf.keras.layers.BatchNormalization()(conv2)
|
||||||
|
|
||||||
|
b3 = tf.keras.layers.BatchNormalization()(input_tensor)
|
||||||
|
|
||||||
|
out = tf.keras.layers.Add()([b1, b2, b3])
|
||||||
|
out = tf.nn.relu(out)
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
# RepBlock模块
|
||||||
|
def RepBlock(input_tensor, num: int = 3):
|
||||||
|
for i in range(num):
|
||||||
|
input_tensor = RepConv(input_tensor)
|
||||||
|
return input_tensor
|
||||||
|
|
||||||
|
|
||||||
|
# GAP 全局平均池化
|
||||||
|
def Global_avg_channelAttention(input_tensor):
|
||||||
|
_, length, channel = input_tensor.shape
|
||||||
|
DWC1 = DepthwiseConv1D(kernel_size=1, padding='SAME')(input_tensor)
|
||||||
|
GAP = tf.keras.layers.GlobalAvgPool1D()(DWC1)
|
||||||
|
c1 = tf.keras.layers.Conv1D(filters=channel, kernel_size=1, padding='SAME')(GAP)
|
||||||
|
s1 = tf.nn.sigmoid(c1)
|
||||||
|
output = tf.multiply(input_tensor, s1)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
# GDP 全局动态池化
|
||||||
|
def Global_Dynamic_channelAttention(input_tensor):
|
||||||
|
_, length, channel = input_tensor.shape
|
||||||
|
DWC1 = DepthwiseConv1D(kernel_size=1, padding='SAME')(input_tensor)
|
||||||
|
|
||||||
|
# GAP
|
||||||
|
GAP = tf.keras.layers.GlobalAvgPool1D()(DWC1)
|
||||||
|
c1 = tf.keras.layers.Conv1D(filters=channel, kernel_size=1, padding='SAME')(GAP)
|
||||||
|
s1 = tf.nn.sigmoid(c1)
|
||||||
|
|
||||||
|
# GMP
|
||||||
|
GMP = tf.keras.layers.GlobalMaxPool1D()(DWC1)
|
||||||
|
c2 = tf.keras.layers.Conv1D(filters=channel, kernel_size=1, padding='SAME')(GMP)
|
||||||
|
s3 = tf.nn.sigmoid(c2)
|
||||||
|
|
||||||
|
output = tf.multiply(input_tensor, s1)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
# 归一化
|
||||||
|
def normalization(data):
|
||||||
|
rows, cols = data.shape
|
||||||
|
print("归一化之前:", data)
|
||||||
|
print(data.shape)
|
||||||
|
print("======================")
|
||||||
|
|
||||||
|
# 归一化
|
||||||
|
max = np.max(data, axis=0)
|
||||||
|
max = np.broadcast_to(max, [rows, cols])
|
||||||
|
min = np.min(data, axis=0)
|
||||||
|
min = np.broadcast_to(min, [rows, cols])
|
||||||
|
|
||||||
|
data = (data - min) / (max - min)
|
||||||
|
print("归一化之后:", data)
|
||||||
|
print(data.shape)
|
||||||
|
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
# 正则化
|
||||||
|
def Regularization(data):
|
||||||
|
rows, cols = data.shape
|
||||||
|
print("正则化之前:", data)
|
||||||
|
print(data.shape)
|
||||||
|
print("======================")
|
||||||
|
|
||||||
|
# 正则化
|
||||||
|
mean = np.mean(data, axis=0)
|
||||||
|
mean = np.broadcast_to(mean, shape=[rows, cols])
|
||||||
|
dst = np.sqrt(np.var(data, axis=0))
|
||||||
|
dst = np.broadcast_to(dst, shape=[rows, cols])
|
||||||
|
data = (data - mean) / dst
|
||||||
|
print("正则化之后:", data)
|
||||||
|
print(data.shape)
|
||||||
|
|
||||||
|
return data
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def EWMA(data, K=K, namuda=namuda):
|
||||||
|
# t是啥暂时未知
|
||||||
|
t = 0
|
||||||
|
mid = np.mean(data, axis=0)
|
||||||
|
standard = np.sqrt(np.var(data, axis=0))
|
||||||
|
UCL = mid + K * standard * np.sqrt(namuda / (2 - namuda) * (1 - (1 - namuda) ** 2 * t))
|
||||||
|
LCL = mid - K * standard * np.sqrt(namuda / (2 - namuda) * (1 - (1 - namuda) ** 2 * t))
|
||||||
|
return mid, UCL, LCL
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def condition_monitoring_model():
|
||||||
|
input = tf.keras.Input(shape=[time_stamp, feature_num])
|
||||||
|
conv1 = tf.keras.layers.Conv1D(filters=256, kernel_size=1)(input)
|
||||||
|
GRU1 = tf.keras.layers.GRU(128, return_sequences=False)(conv1)
|
||||||
|
d1 = tf.keras.layers.Dense(300)(GRU1)
|
||||||
|
output = tf.keras.layers.Dense(10)(d1)
|
||||||
|
|
||||||
|
model = tf.keras.Model(inputs=input, outputs=output)
|
||||||
|
|
||||||
|
return model
|
||||||
|
|
||||||
|
|
||||||
|
# trian_data:(300455,120,10)
|
||||||
|
# trian_label1:(300455,10)
|
||||||
|
# trian_label2:(300455,)
|
||||||
|
def shuffle(train_data, train_label1, train_label2, is_split: bool = False, split_size: float = 0.2):
|
||||||
|
(train_data, test_data, train_label1, test_label1, train_label2, test_label2) = train_test_split(train_data,
|
||||||
|
train_label1,
|
||||||
|
train_label2,
|
||||||
|
test_size=split_size,
|
||||||
|
shuffle=True,
|
||||||
|
random_state=100)
|
||||||
|
if is_split:
|
||||||
|
return train_data, train_label1, train_label2, test_data, test_label1, test_label2
|
||||||
|
train_data = np.concatenate([train_data, test_data], axis=0)
|
||||||
|
train_label1 = np.concatenate([train_label1, test_label1], axis=0)
|
||||||
|
train_label2 = np.concatenate([train_label2, test_label2], axis=0)
|
||||||
|
# print(train_data.shape)
|
||||||
|
# print(train_label1.shape)
|
||||||
|
# print(train_label2.shape)
|
||||||
|
# print(train_data.shape)
|
||||||
|
|
||||||
|
return train_data, train_label1, train_label2
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def split_test_data(healthy_data, healthy_label1, healthy_label2, unhealthy_data, unhealthy_label1, unhealthy_label2,
|
||||||
|
split_size: float = 0.2, shuffle: bool = True):
|
||||||
|
data = np.concatenate([healthy_data, unhealthy_data], axis=0)
|
||||||
|
label1 = np.concatenate([healthy_label1, unhealthy_label1], axis=0)
|
||||||
|
label2 = np.concatenate([healthy_label2, unhealthy_label2], axis=0)
|
||||||
|
(train_data, test_data, train_label1, test_label1, train_label2, test_label2) = train_test_split(data,
|
||||||
|
label1,
|
||||||
|
label2,
|
||||||
|
test_size=split_size,
|
||||||
|
shuffle=shuffle,
|
||||||
|
random_state=100)
|
||||||
|
|
||||||
|
# print(train_data.shape)
|
||||||
|
# print(train_label1.shape)
|
||||||
|
# print(train_label2.shape)
|
||||||
|
# print(train_data.shape)
|
||||||
|
|
||||||
|
return train_data, train_label1, train_label2, test_data, test_label1, test_label2
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# trian_data:(300455,120,10)
|
||||||
|
# trian_label1:(300455,10)
|
||||||
|
# trian_label2:(300455,)
|
||||||
|
def train_step_one(train_data, train_label1, train_label2):
|
||||||
|
model = Joint_Monitoring()
|
||||||
|
# # # # TODO 需要运行编译一次,才能打印model.summary()
|
||||||
|
# model.build(input_shape=(batch_size, filter_num, dims))
|
||||||
|
# model.summary()
|
||||||
|
history_loss = []
|
||||||
|
history_val_loss = []
|
||||||
|
learning_rate = 1e-3
|
||||||
|
for epoch in range(EPOCH):
|
||||||
|
|
||||||
|
print()
|
||||||
|
print("EPOCH:", epoch, "/", EPOCH, ":")
|
||||||
|
train_data, train_label1, train_label2 = shuffle(train_data, train_label1, train_label2)
|
||||||
|
if epoch == 0:
|
||||||
|
train_data, train_label1, train_label2, val_data, val_label1, val_label2 = shuffle(train_data, train_label1,
|
||||||
|
train_label2,
|
||||||
|
is_split=True)
|
||||||
|
# print()
|
||||||
|
# print("EPOCH:", epoch, "/", EPOCH, ":")
|
||||||
|
# 用于让train知道,这是这个epoch中的第几次训练
|
||||||
|
z = 0
|
||||||
|
# 用于batch_size次再训练
|
||||||
|
k = 1
|
||||||
|
for data_1, label_1, label_2 in zip(train_data, train_label1, train_label2):
|
||||||
|
size, _, _ = train_data.shape
|
||||||
|
data_1 = tf.expand_dims(data_1, axis=0)
|
||||||
|
label_1 = tf.expand_dims(label_1, axis=0)
|
||||||
|
label_2 = tf.expand_dims(label_2, axis=0)
|
||||||
|
if batch_size != 1:
|
||||||
|
if k % batch_size == 1:
|
||||||
|
data = data_1
|
||||||
|
label1 = label_1
|
||||||
|
label2 = label_2
|
||||||
|
else:
|
||||||
|
data = tf.concat([data, data_1], axis=0)
|
||||||
|
label1 = tf.concat([label1, label_1], axis=0)
|
||||||
|
label2 = tf.concat([label2, label_2], axis=0)
|
||||||
|
else:
|
||||||
|
data = data_1
|
||||||
|
label1 = label_1
|
||||||
|
label2 = label_2
|
||||||
|
|
||||||
|
if k % batch_size == 0:
|
||||||
|
# label = tf.expand_dims(label, axis=-1)
|
||||||
|
loss_value, accuracy_value = model.train(input_tensor=data, label1=label1, label2=label2,
|
||||||
|
learning_rate=learning_rate,
|
||||||
|
is_first_time=True)
|
||||||
|
print(z * batch_size, "/", size, ":===============>", "loss:", loss_value.numpy())
|
||||||
|
k = 0
|
||||||
|
z = z + 1
|
||||||
|
k = k + 1
|
||||||
|
val_loss, val_accuracy = model.get_val_loss(val_data=val_data, val_label1=val_label1, val_label2=val_label2,
|
||||||
|
is_first_time=True)
|
||||||
|
SaveBestModel(model=model, save_name=save_name, history_loss=history_val_loss, loss_value=val_loss.numpy())
|
||||||
|
# SaveBestH5Model(model=model, save_name=save_name, history_loss=history_val_loss, loss_value=val_loss.numpy())
|
||||||
|
history_val_loss.append(val_loss)
|
||||||
|
history_loss.append(loss_value.numpy())
|
||||||
|
print('Training loss is :', loss_value.numpy())
|
||||||
|
print('Validating loss is :', val_loss.numpy())
|
||||||
|
if IsStopTraining(history_loss=history_val_loss, patience=7):
|
||||||
|
break
|
||||||
|
if Is_Reduce_learning_rate(history_loss=history_val_loss, patience=3):
|
||||||
|
if learning_rate >= 1e-4:
|
||||||
|
learning_rate = learning_rate * 0.1
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def train_step_two(step_one_model, step_two_model, train_data, train_label1, train_label2):
|
||||||
|
# step_two_model = Joint_Monitoring()
|
||||||
|
# step_two_model.build(input_shape=(batch_size, time_stamp, feature_num))
|
||||||
|
# step_two_model.summary()
|
||||||
|
history_loss = []
|
||||||
|
history_val_loss = []
|
||||||
|
history_accuracy = []
|
||||||
|
learning_rate = 1e-3
|
||||||
|
for epoch in range(EPOCH):
|
||||||
|
print()
|
||||||
|
print("EPOCH:", epoch, "/", EPOCH, ":")
|
||||||
|
train_data, train_label1, train_label2 = shuffle(train_data, train_label1, train_label2)
|
||||||
|
if epoch == 0:
|
||||||
|
train_data, train_label1, train_label2, val_data, val_label1, val_label2 = shuffle(train_data, train_label1,
|
||||||
|
train_label2,
|
||||||
|
is_split=True)
|
||||||
|
# print()
|
||||||
|
# print("EPOCH:", epoch, "/", EPOCH, ":")
|
||||||
|
# 用于让train知道,这是这个epoch中的第几次训练
|
||||||
|
z = 0
|
||||||
|
# 用于batch_size次再训练
|
||||||
|
k = 1
|
||||||
|
accuracy_num = 0
|
||||||
|
for data_1, label_1, label_2 in zip(train_data, train_label1, train_label2):
|
||||||
|
size, _, _ = train_data.shape
|
||||||
|
data_1 = tf.expand_dims(data_1, axis=0)
|
||||||
|
label_1 = tf.expand_dims(label_1, axis=0)
|
||||||
|
label_2 = tf.expand_dims(label_2, axis=0)
|
||||||
|
if batch_size != 1:
|
||||||
|
if k % batch_size == 1:
|
||||||
|
data = data_1
|
||||||
|
label1 = label_1
|
||||||
|
label2 = label_2
|
||||||
|
else:
|
||||||
|
data = tf.concat([data, data_1], axis=0)
|
||||||
|
label1 = tf.concat([label1, label_1], axis=0)
|
||||||
|
label2 = tf.concat([label2, label_2], axis=0)
|
||||||
|
else:
|
||||||
|
data = data_1
|
||||||
|
label1 = label_1
|
||||||
|
label2 = label_2
|
||||||
|
|
||||||
|
if k % batch_size == 0:
|
||||||
|
# label = tf.expand_dims(label, axis=-1)
|
||||||
|
# output1, output2, output3, _ = step_one_model.call(inputs=data, is_first_time=True)
|
||||||
|
loss_value, accuracy_value = step_two_model.train(input_tensor=data, label1=label1, label2=label2,
|
||||||
|
learning_rate=learning_rate,
|
||||||
|
is_first_time=False)
|
||||||
|
accuracy_num += accuracy_value
|
||||||
|
print(z * batch_size, "/", size, ":===============>", "loss:", loss_value.numpy(), "| accuracy:",
|
||||||
|
accuracy_num / ((z + 1) * batch_size))
|
||||||
|
k = 0
|
||||||
|
z = z + 1
|
||||||
|
k = k + 1
|
||||||
|
|
||||||
|
val_loss, val_accuracy = step_two_model.get_val_loss(val_data=val_data, val_label1=val_label1,
|
||||||
|
val_label2=val_label2,
|
||||||
|
is_first_time=False, step_one_model=step_one_model)
|
||||||
|
SaveBestModelByAccuracy(model=step_two_model, save_name=save_step_two_name, history_accuracy=history_accuracy,
|
||||||
|
accuracy_value=val_accuracy)
|
||||||
|
history_val_loss.append(val_loss)
|
||||||
|
history_loss.append(loss_value.numpy())
|
||||||
|
history_accuracy.append(val_accuracy)
|
||||||
|
print('Training loss is : {0} | Training accuracy is : {1}'.format(loss_value.numpy(),
|
||||||
|
accuracy_num / ((z + 1) * batch_size)))
|
||||||
|
print('Validating loss is : {0} | Validating accuracy is : {1}'.format(val_loss.numpy(), val_accuracy))
|
||||||
|
if IsStopTraining(history_loss=history_val_loss, patience=7):
|
||||||
|
break
|
||||||
|
if Is_Reduce_learning_rate(history_loss=history_val_loss, patience=3):
|
||||||
|
if learning_rate >= 1e-4:
|
||||||
|
learning_rate = learning_rate * 0.1
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def test(step_one_model, step_two_model, test_data, test_label1, test_label2):
|
||||||
|
history_loss = []
|
||||||
|
history_val_loss = []
|
||||||
|
|
||||||
|
val_loss, val_accuracy = step_two_model.get_val_loss(val_data=test_data, val_label1=test_label1,
|
||||||
|
val_label2=test_label2,
|
||||||
|
is_first_time=False, step_one_model=step_one_model)
|
||||||
|
|
||||||
|
history_val_loss.append(val_loss)
|
||||||
|
print("val_accuracy:", val_accuracy)
|
||||||
|
print("val_loss:", val_loss)
|
||||||
|
|
||||||
|
|
||||||
|
def showResult(step_two_model: Joint_Monitoring, test_data, isPlot: bool = False,isSave:bool=True):
|
||||||
|
# 获取模型的所有参数的个数
|
||||||
|
# step_two_model.count_params()
|
||||||
|
total_result = []
|
||||||
|
size, length, dims = test_data.shape
|
||||||
|
for epoch in range(0, size - batch_size + 1, batch_size):
|
||||||
|
each_test_data = test_data[epoch:epoch + batch_size, :, :]
|
||||||
|
_, _, _, output4 = step_two_model.call(each_test_data, is_first_time=False)
|
||||||
|
total_result.append(output4)
|
||||||
|
total_result = np.reshape(total_result, [total_result.__len__(), -1])
|
||||||
|
total_result = np.reshape(total_result, [-1, ])
|
||||||
|
|
||||||
|
#误报率,漏报率,准确性的计算
|
||||||
|
|
||||||
|
|
||||||
|
if isSave:
|
||||||
|
|
||||||
|
np.savetxt(save_mse_name, total_result, delimiter=',')
|
||||||
|
if isPlot:
|
||||||
|
plt.figure(1, figsize=(6.0, 2.68))
|
||||||
|
plt.subplots_adjust(left=0.1, right=0.94, bottom=0.2, top=0.9, wspace=None,
|
||||||
|
hspace=None)
|
||||||
|
plt.tight_layout()
|
||||||
|
font1 = {'family': 'Times New Roman', 'weight': 'normal', 'size': 10} # 设置坐标标签的字体大小,字体
|
||||||
|
|
||||||
|
plt.scatter(list(range(total_result.shape[0])), total_result, c='black', s=10)
|
||||||
|
# 画出 y=1 这条水平线
|
||||||
|
plt.axhline(0.5, c='red', label='Failure threshold')
|
||||||
|
# 箭头指向上面的水平线
|
||||||
|
# plt.arrow(35000, 0.9, 33000, 0.75, head_width=0.02, head_length=0.1, shape="full", fc='red', ec='red',
|
||||||
|
# alpha=0.9, overhang=0.5)
|
||||||
|
plt.text(test_data.shape[0] * 2 / 3+1000, 0.7, "Truth Fault", fontsize=10, color='red', verticalalignment='top')
|
||||||
|
plt.axvline(test_data.shape[0] * 2 / 3, c='blue', ls='-.')
|
||||||
|
plt.xticks(range(6),('06/09/17','12/09/17','18/09/17','24/09/17','29/09/17')) # 设置x轴的标尺
|
||||||
|
plt.tick_params() #设置轴显示
|
||||||
|
plt.xlabel("time",fontdict=font1)
|
||||||
|
plt.ylabel("confience",fontdict=font1)
|
||||||
|
plt.text(total_result.shape[0] * 4 / 5, 0.6, "Fault", fontsize=10, color='black', verticalalignment='top',
|
||||||
|
horizontalalignment='center',
|
||||||
|
bbox={'facecolor': 'grey',
|
||||||
|
'pad': 10},fontdict=font1)
|
||||||
|
plt.text(total_result.shape[0] * 1 / 3, 0.4, "Norm", fontsize=10, color='black', verticalalignment='top',
|
||||||
|
horizontalalignment='center',
|
||||||
|
bbox={'facecolor': 'grey',
|
||||||
|
'pad': 10},fontdict=font1)
|
||||||
|
|
||||||
|
plt.grid()
|
||||||
|
# plt.ylim(0, 1)
|
||||||
|
# plt.xlim(-50, 1300)
|
||||||
|
# plt.legend("", loc='upper left')
|
||||||
|
plt.show()
|
||||||
|
return total_result
|
||||||
|
|
||||||
|
|
||||||
|
def get_MSE(data, label, new_model, isStandard: bool = True, isPlot: bool = True, predictI: int = 1):
|
||||||
|
predicted_data1 = []
|
||||||
|
predicted_data2 = []
|
||||||
|
predicted_data3 = []
|
||||||
|
size, length, dims = data.shape
|
||||||
|
for epoch in range(0, size, batch_size):
|
||||||
|
each_test_data = data[epoch:epoch + batch_size, :, :]
|
||||||
|
output1, output2, output3, _ = new_model.call(inputs=each_test_data, is_first_time=True)
|
||||||
|
if epoch == 0:
|
||||||
|
predicted_data1 = output1
|
||||||
|
predicted_data2 = output2
|
||||||
|
predicted_data3 = output3
|
||||||
|
else:
|
||||||
|
predicted_data1 = np.concatenate([predicted_data1, output1], axis=0)
|
||||||
|
predicted_data2 = np.concatenate([predicted_data2, output2], axis=0)
|
||||||
|
predicted_data3 = np.concatenate([predicted_data3, output3], axis=0)
|
||||||
|
|
||||||
|
predicted_data1 = np.reshape(predicted_data1, [-1, 10])
|
||||||
|
predicted_data2 = np.reshape(predicted_data2, [-1, 10])
|
||||||
|
predicted_data3 = np.reshape(predicted_data3, [-1, 10])
|
||||||
|
predict_data = 0
|
||||||
|
|
||||||
|
predict_data = predicted_data1
|
||||||
|
mseList = []
|
||||||
|
meanList = []
|
||||||
|
maxList = []
|
||||||
|
|
||||||
|
for i in range(1, 4):
|
||||||
|
print("i:", i)
|
||||||
|
if i == 1:
|
||||||
|
predict_data = predicted_data1
|
||||||
|
elif i == 2:
|
||||||
|
predict_data = predicted_data2
|
||||||
|
elif i == 3:
|
||||||
|
predict_data = predicted_data3
|
||||||
|
temp = np.abs(predict_data - label)
|
||||||
|
temp1 = (temp - np.broadcast_to(np.mean(temp, axis=0), shape=predict_data.shape))
|
||||||
|
temp2 = np.broadcast_to(np.sqrt(np.var(temp, axis=0)), shape=predict_data.shape)
|
||||||
|
temp3 = temp1 / temp2
|
||||||
|
mse = np.sum((temp1 / temp2) ** 2, axis=1)
|
||||||
|
|
||||||
|
print("mse.shape:", mse.shape)
|
||||||
|
# mse=np.mean((predicted_data-label)**2,axis=1)
|
||||||
|
# print("mse", mse)
|
||||||
|
mseList.append(mse)
|
||||||
|
if isStandard:
|
||||||
|
dims, = mse.shape
|
||||||
|
mean = np.mean(mse)
|
||||||
|
std = np.sqrt(np.var(mse))
|
||||||
|
max = mean + 3 * std
|
||||||
|
print("max.shape:", max.shape)
|
||||||
|
# min = mean-3*std
|
||||||
|
max = np.broadcast_to(max, shape=[dims, ])
|
||||||
|
# min = np.broadcast_to(min,shape=[dims,])
|
||||||
|
mean = np.broadcast_to(mean, shape=[dims, ])
|
||||||
|
if isPlot:
|
||||||
|
plt.figure(random.randint(1, 100))
|
||||||
|
plt.plot(max)
|
||||||
|
plt.plot(mse)
|
||||||
|
plt.plot(mean)
|
||||||
|
# plt.plot(min)
|
||||||
|
plt.show()
|
||||||
|
maxList.append(max)
|
||||||
|
meanList.append(mean)
|
||||||
|
else:
|
||||||
|
if isPlot:
|
||||||
|
plt.figure(random.randint(1, 100))
|
||||||
|
plt.plot(mse)
|
||||||
|
# plt.plot(min)
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
return mseList, meanList, maxList
|
||||||
|
# pass
|
||||||
|
|
||||||
|
|
||||||
|
# healthy_data是健康数据,用于确定阈值,all_data是完整的数据,用于模型出结果
|
||||||
|
def getResult(model: tf.keras.Model, healthy_data, healthy_label, unhealthy_data, unhealthy_label, isPlot: bool = False,
|
||||||
|
isSave: bool = False, predictI: int = 1):
|
||||||
|
# TODO 计算MSE确定阈值
|
||||||
|
# plt.ion()
|
||||||
|
mseList, meanList, maxList = get_MSE(healthy_data, healthy_label, model)
|
||||||
|
mse1List, _, _ = get_MSE(unhealthy_data, unhealthy_label, model, isStandard=False)
|
||||||
|
|
||||||
|
for mse, mean, max, mse1,j in zip(mseList, meanList, maxList, mse1List,range(3)):
|
||||||
|
|
||||||
|
# 误报率的计算
|
||||||
|
total, = mse.shape
|
||||||
|
faultNum = 0
|
||||||
|
faultList = []
|
||||||
|
for i in range(total):
|
||||||
|
if (mse[i] > max[i]):
|
||||||
|
faultNum += 1
|
||||||
|
faultList.append(mse[i])
|
||||||
|
|
||||||
|
fault_rate = faultNum / total
|
||||||
|
print("误报率:", fault_rate)
|
||||||
|
|
||||||
|
# 漏报率计算
|
||||||
|
missNum = 0
|
||||||
|
missList = []
|
||||||
|
all, = mse1.shape
|
||||||
|
for i in range(all):
|
||||||
|
if (mse1[i] < max[0]):
|
||||||
|
missNum += 1
|
||||||
|
missList.append(mse1[i])
|
||||||
|
|
||||||
|
miss_rate = missNum / all
|
||||||
|
print("漏报率:", miss_rate)
|
||||||
|
|
||||||
|
# 总体图
|
||||||
|
print("mse:", mse)
|
||||||
|
print("mse1:", mse1)
|
||||||
|
print("============================================")
|
||||||
|
total_mse = np.concatenate([mse, mse1], axis=0)
|
||||||
|
total_max = np.broadcast_to(max[0], shape=[total_mse.shape[0], ])
|
||||||
|
# min = np.broadcast_to(min,shape=[dims,])
|
||||||
|
total_mean = np.broadcast_to(mean[0], shape=[total_mse.shape[0], ])
|
||||||
|
|
||||||
|
if isSave:
|
||||||
|
save_mse_name1=save_mse_name[:-4]+"_predict"+str(j+1)+".csv"
|
||||||
|
save_max_name1=save_max_name[:-4]+"_predict"+str(j+1)+".csv"
|
||||||
|
|
||||||
|
np.savetxt(save_mse_name1,total_mse, delimiter=',')
|
||||||
|
np.savetxt(save_max_name1,total_max, delimiter=',')
|
||||||
|
|
||||||
|
|
||||||
|
plt.figure(random.randint(1, 100))
|
||||||
|
plt.plot(total_max)
|
||||||
|
plt.plot(total_mse)
|
||||||
|
plt.plot(total_mean)
|
||||||
|
# plt.plot(min)
|
||||||
|
plt.show()
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
total_data = loadData.execute(N=feature_num, file_name=file_name)
|
||||||
|
total_data = normalization(data=total_data)
|
||||||
|
train_data_healthy, train_label1_healthy, train_label2_healthy = get_training_data_overlapping(
|
||||||
|
total_data[:healthy_date, :], is_Healthy=True)
|
||||||
|
train_data_unhealthy, train_label1_unhealthy, train_label2_unhealthy = get_training_data_overlapping(
|
||||||
|
total_data[healthy_date - time_stamp + unhealthy_patience:unhealthy_date, :],
|
||||||
|
is_Healthy=False)
|
||||||
|
#### TODO 第一步训练
|
||||||
|
# 单次测试
|
||||||
|
# train_step_one(train_data=train_data_healthy[:32, :, :], train_label1=train_label1_healthy[:32, :],train_label2=train_label2_healthy[:32, ])
|
||||||
|
# train_step_one(train_data=train_data_healthy, train_label1=train_label1_healthy, train_label2=train_label2_healthy)
|
||||||
|
|
||||||
|
# 导入第一步已经训练好的模型,一个继续训练,一个只输出结果
|
||||||
|
# step_one_model = Joint_Monitoring()
|
||||||
|
# # step_one_model.load_weights(save_name)
|
||||||
|
# #
|
||||||
|
# step_two_model = Joint_Monitoring()
|
||||||
|
# step_two_model.load_weights(save_name)
|
||||||
|
|
||||||
|
#### TODO 第二步训练
|
||||||
|
### healthy_data.shape: (300333,120,10)
|
||||||
|
### unhealthy_data.shape: (16594,10)
|
||||||
|
healthy_size, _, _ = train_data_healthy.shape
|
||||||
|
unhealthy_size, _, _ = train_data_unhealthy.shape
|
||||||
|
train_data, train_label1, train_label2, test_data, test_label1, test_label2 = split_test_data(
|
||||||
|
healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:, :, :],
|
||||||
|
healthy_label1=train_label1_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||||
|
healthy_label2=train_label2_healthy[healthy_size - 2 * unhealthy_size:, ], unhealthy_data=train_data_unhealthy,
|
||||||
|
unhealthy_label1=train_label1_unhealthy, unhealthy_label2=train_label2_unhealthy)
|
||||||
|
# train_step_two(step_one_model=step_one_model, step_two_model=step_two_model,
|
||||||
|
# train_data=train_data,
|
||||||
|
# train_label1=train_label1, train_label2=np.expand_dims(train_label2, axis=-1))
|
||||||
|
|
||||||
|
healthy_size, _, _ = train_data_healthy.shape
|
||||||
|
unhealthy_size, _, _ = train_data_unhealthy.shape
|
||||||
|
# all_data, _, _ = get_training_data_overlapping(
|
||||||
|
# total_data[healthy_size - 2 * unhealthy_size:unhealthy_date, :], is_Healthy=True)
|
||||||
|
|
||||||
|
##出结果单次测试
|
||||||
|
# getResult(step_one_model,
|
||||||
|
# healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:healthy_size - 2 * unhealthy_size + 200,
|
||||||
|
# :],
|
||||||
|
# healthy_label=train_label1_healthy[
|
||||||
|
# healthy_size - 2 * unhealthy_size:healthy_size - 2 * unhealthy_size + 200, :],
|
||||||
|
# unhealthy_data=train_data_unhealthy[:200, :], unhealthy_label=train_label1_unhealthy[:200, :],isSave=True)
|
||||||
|
|
||||||
|
### TODO 测试测试集
|
||||||
|
# step_one_model = Joint_Monitoring()
|
||||||
|
# step_one_model.load_weights(save_name)
|
||||||
|
step_two_model = Joint_Monitoring()
|
||||||
|
step_two_model.load_weights(save_step_two_name)
|
||||||
|
# test(step_one_model=step_one_model, step_two_model=step_two_model, test_data=test_data, test_label1=test_label1,
|
||||||
|
# test_label2=np.expand_dims(test_label2, axis=-1))
|
||||||
|
|
||||||
|
###TODO 展示全部的结果
|
||||||
|
all_data, _, _ = get_training_data_overlapping(
|
||||||
|
total_data[healthy_size - 2 * unhealthy_size:unhealthy_date, :], is_Healthy=True)
|
||||||
|
# all_data = np.concatenate([])
|
||||||
|
# 单次测试
|
||||||
|
# showResult(step_two_model, test_data=all_data[:32], isPlot=True)
|
||||||
|
showResult(step_two_model, test_data=all_data, isPlot=True)
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
@ -0,0 +1,714 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# coding: utf-8
|
||||||
|
|
||||||
|
'''
|
||||||
|
@Author : dingjiawen
|
||||||
|
@Date : 2022/10/11 18:55
|
||||||
|
@Usage :
|
||||||
|
@Desc : RNet直接进行分类
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
import tensorflow as tf
|
||||||
|
import tensorflow.keras
|
||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
from model.DepthwiseCon1D.DepthwiseConv1D import DepthwiseConv1D
|
||||||
|
from model.Dynamic_channelAttention.Dynamic_channelAttention import DynamicChannelAttention
|
||||||
|
from condition_monitoring.data_deal import loadData
|
||||||
|
from model.Joint_Monitoring.compare.RNet_45 import Joint_Monitoring
|
||||||
|
|
||||||
|
from model.CommonFunction.CommonFunction import *
|
||||||
|
from sklearn.model_selection import train_test_split
|
||||||
|
from tensorflow.keras.models import load_model, save_model
|
||||||
|
import random
|
||||||
|
|
||||||
|
'''超参数设置'''
|
||||||
|
time_stamp = 120
|
||||||
|
feature_num = 10
|
||||||
|
batch_size = 16
|
||||||
|
learning_rate = 0.001
|
||||||
|
EPOCH = 101
|
||||||
|
model_name = "RNet_C"
|
||||||
|
'''EWMA超参数'''
|
||||||
|
K = 18
|
||||||
|
namuda = 0.01
|
||||||
|
'''保存名称'''
|
||||||
|
|
||||||
|
save_name = "./model/weight/{0}/weight".format(model_name,
|
||||||
|
time_stamp,
|
||||||
|
feature_num,
|
||||||
|
batch_size,
|
||||||
|
EPOCH)
|
||||||
|
save_step_two_name = "./model/two_weight/{0}_weight_epoch6_99899_9996/weight".format(model_name,
|
||||||
|
time_stamp,
|
||||||
|
feature_num,
|
||||||
|
batch_size,
|
||||||
|
EPOCH)
|
||||||
|
|
||||||
|
save_mse_name = "./mse/RNet_C/{0}_timestamp{1}_feature{2}_result.csv".format(model_name,
|
||||||
|
time_stamp,
|
||||||
|
feature_num,
|
||||||
|
batch_size,
|
||||||
|
EPOCH)
|
||||||
|
save_max_name = "./mse/RNet_C/{0}_timestamp{1}_feature{2}_max.csv".format(model_name,
|
||||||
|
time_stamp,
|
||||||
|
feature_num,
|
||||||
|
batch_size,
|
||||||
|
EPOCH)
|
||||||
|
|
||||||
|
'''文件名'''
|
||||||
|
file_name = "G:\data\SCADA数据\jb4q_8_delete_total_zero.csv"
|
||||||
|
|
||||||
|
'''
|
||||||
|
文件说明:jb4q_8_delete_total_zero.csv是删除了只删除了全是0的列的文件
|
||||||
|
文件从0:415548行均是正常值(2019/7.30 00:00:00 - 2019/9/18 11:14:00)
|
||||||
|
从415549:432153行均是异常值(2019/9/18 11:21:01 - 2021/1/18 00:00:00)
|
||||||
|
'''
|
||||||
|
'''文件参数'''
|
||||||
|
# 最后正常的时间点
|
||||||
|
healthy_date = 415548
|
||||||
|
# 最后异常的时间点
|
||||||
|
unhealthy_date = 432153
|
||||||
|
# 异常容忍程度
|
||||||
|
unhealthy_patience = 5
|
||||||
|
|
||||||
|
|
||||||
|
# 画图相关设置
|
||||||
|
font1 = {'family': 'Times New Roman', 'weight': 'normal', 'size': 10} # 设置坐标标签的字体大小,字体
|
||||||
|
|
||||||
|
|
||||||
|
def remove(data, time_stamp=time_stamp):
|
||||||
|
rows, cols = data.shape
|
||||||
|
print("remove_data.shape:", data.shape)
|
||||||
|
num = int(rows / time_stamp)
|
||||||
|
|
||||||
|
return data[:num * time_stamp, :]
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# 不重叠采样
|
||||||
|
def get_training_data(data, time_stamp: int = time_stamp):
|
||||||
|
removed_data = remove(data=data)
|
||||||
|
rows, cols = removed_data.shape
|
||||||
|
print("removed_data.shape:", data.shape)
|
||||||
|
print("removed_data:", removed_data)
|
||||||
|
train_data = np.reshape(removed_data, [-1, time_stamp, cols])
|
||||||
|
print("train_data:", train_data)
|
||||||
|
batchs, time_stamp, cols = train_data.shape
|
||||||
|
|
||||||
|
for i in range(1, batchs):
|
||||||
|
each_label = np.expand_dims(train_data[i, 0, :], axis=0)
|
||||||
|
if i == 1:
|
||||||
|
train_label = each_label
|
||||||
|
else:
|
||||||
|
train_label = np.concatenate([train_label, each_label], axis=0)
|
||||||
|
|
||||||
|
print("train_data.shape:", train_data.shape)
|
||||||
|
print("train_label.shape", train_label.shape)
|
||||||
|
return train_data[:-1, :], train_label
|
||||||
|
|
||||||
|
|
||||||
|
# 重叠采样
|
||||||
|
def get_training_data_overlapping(data, time_stamp: int = time_stamp, is_Healthy: bool = True):
|
||||||
|
rows, cols = data.shape
|
||||||
|
train_data = np.empty(shape=[rows - time_stamp - 1, time_stamp, cols])
|
||||||
|
train_label = np.empty(shape=[rows - time_stamp - 1, cols])
|
||||||
|
for i in range(rows):
|
||||||
|
if i + time_stamp >= rows:
|
||||||
|
break
|
||||||
|
if i + time_stamp < rows - 1:
|
||||||
|
train_data[i] = data[i:i + time_stamp]
|
||||||
|
train_label[i] = data[i + time_stamp]
|
||||||
|
|
||||||
|
print("重叠采样以后:")
|
||||||
|
print("data:", train_data) # (300334,120,10)
|
||||||
|
print("label:", train_label) # (300334,10)
|
||||||
|
|
||||||
|
if is_Healthy:
|
||||||
|
train_label2 = np.ones(shape=[train_label.shape[0]])
|
||||||
|
else:
|
||||||
|
train_label2 = np.zeros(shape=[train_label.shape[0]])
|
||||||
|
|
||||||
|
print("label2:", train_label2)
|
||||||
|
|
||||||
|
return train_data, train_label, train_label2
|
||||||
|
|
||||||
|
|
||||||
|
# RepConv重参数化卷积
|
||||||
|
def RepConv(input_tensor, k=3):
|
||||||
|
_, _, output_dim = input_tensor.shape
|
||||||
|
conv1 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=k, strides=1, padding='SAME')(input_tensor)
|
||||||
|
b1 = tf.keras.layers.BatchNormalization()(conv1)
|
||||||
|
|
||||||
|
conv2 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=1, strides=1, padding='SAME')(input_tensor)
|
||||||
|
b2 = tf.keras.layers.BatchNormalization()(conv2)
|
||||||
|
|
||||||
|
b3 = tf.keras.layers.BatchNormalization()(input_tensor)
|
||||||
|
|
||||||
|
out = tf.keras.layers.Add()([b1, b2, b3])
|
||||||
|
out = tf.nn.relu(out)
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
# RepBlock模块
|
||||||
|
def RepBlock(input_tensor, num: int = 3):
|
||||||
|
for i in range(num):
|
||||||
|
input_tensor = RepConv(input_tensor)
|
||||||
|
return input_tensor
|
||||||
|
|
||||||
|
|
||||||
|
# GAP 全局平均池化
|
||||||
|
def Global_avg_channelAttention(input_tensor):
|
||||||
|
_, length, channel = input_tensor.shape
|
||||||
|
DWC1 = DepthwiseConv1D(kernel_size=1, padding='SAME')(input_tensor)
|
||||||
|
GAP = tf.keras.layers.GlobalAvgPool1D()(DWC1)
|
||||||
|
c1 = tf.keras.layers.Conv1D(filters=channel, kernel_size=1, padding='SAME')(GAP)
|
||||||
|
s1 = tf.nn.sigmoid(c1)
|
||||||
|
output = tf.multiply(input_tensor, s1)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
# GDP 全局动态池化
|
||||||
|
def Global_Dynamic_channelAttention(input_tensor):
|
||||||
|
_, length, channel = input_tensor.shape
|
||||||
|
DWC1 = DepthwiseConv1D(kernel_size=1, padding='SAME')(input_tensor)
|
||||||
|
|
||||||
|
# GAP
|
||||||
|
GAP = tf.keras.layers.GlobalAvgPool1D()(DWC1)
|
||||||
|
c1 = tf.keras.layers.Conv1D(filters=channel, kernel_size=1, padding='SAME')(GAP)
|
||||||
|
s1 = tf.nn.sigmoid(c1)
|
||||||
|
|
||||||
|
# GMP
|
||||||
|
GMP = tf.keras.layers.GlobalMaxPool1D()(DWC1)
|
||||||
|
c2 = tf.keras.layers.Conv1D(filters=channel, kernel_size=1, padding='SAME')(GMP)
|
||||||
|
s3 = tf.nn.sigmoid(c2)
|
||||||
|
|
||||||
|
output = tf.multiply(input_tensor, s1)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
# 归一化
|
||||||
|
def normalization(data):
|
||||||
|
rows, cols = data.shape
|
||||||
|
print("归一化之前:", data)
|
||||||
|
print(data.shape)
|
||||||
|
print("======================")
|
||||||
|
|
||||||
|
# 归一化
|
||||||
|
max = np.max(data, axis=0)
|
||||||
|
max = np.broadcast_to(max, [rows, cols])
|
||||||
|
min = np.min(data, axis=0)
|
||||||
|
min = np.broadcast_to(min, [rows, cols])
|
||||||
|
|
||||||
|
data = (data - min) / (max - min)
|
||||||
|
print("归一化之后:", data)
|
||||||
|
print(data.shape)
|
||||||
|
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
# 正则化
|
||||||
|
def Regularization(data):
|
||||||
|
rows, cols = data.shape
|
||||||
|
print("正则化之前:", data)
|
||||||
|
print(data.shape)
|
||||||
|
print("======================")
|
||||||
|
|
||||||
|
# 正则化
|
||||||
|
mean = np.mean(data, axis=0)
|
||||||
|
mean = np.broadcast_to(mean, shape=[rows, cols])
|
||||||
|
dst = np.sqrt(np.var(data, axis=0))
|
||||||
|
dst = np.broadcast_to(dst, shape=[rows, cols])
|
||||||
|
data = (data - mean) / dst
|
||||||
|
print("正则化之后:", data)
|
||||||
|
print(data.shape)
|
||||||
|
|
||||||
|
return data
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def EWMA(data, K=K, namuda=namuda):
|
||||||
|
# t是啥暂时未知
|
||||||
|
t = 0
|
||||||
|
mid = np.mean(data, axis=0)
|
||||||
|
standard = np.sqrt(np.var(data, axis=0))
|
||||||
|
UCL = mid + K * standard * np.sqrt(namuda / (2 - namuda) * (1 - (1 - namuda) ** 2 * t))
|
||||||
|
LCL = mid - K * standard * np.sqrt(namuda / (2 - namuda) * (1 - (1 - namuda) ** 2 * t))
|
||||||
|
return mid, UCL, LCL
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def condition_monitoring_model():
|
||||||
|
input = tf.keras.Input(shape=[time_stamp, feature_num])
|
||||||
|
conv1 = tf.keras.layers.Conv1D(filters=256, kernel_size=1)(input)
|
||||||
|
GRU1 = tf.keras.layers.GRU(128, return_sequences=False)(conv1)
|
||||||
|
d1 = tf.keras.layers.Dense(300)(GRU1)
|
||||||
|
output = tf.keras.layers.Dense(10)(d1)
|
||||||
|
|
||||||
|
model = tf.keras.Model(inputs=input, outputs=output)
|
||||||
|
|
||||||
|
return model
|
||||||
|
|
||||||
|
|
||||||
|
# trian_data:(300455,120,10)
|
||||||
|
# trian_label1:(300455,10)
|
||||||
|
# trian_label2:(300455,)
|
||||||
|
def shuffle(train_data, train_label1, train_label2, is_split: bool = False, split_size: float = 0.2):
|
||||||
|
(train_data, test_data, train_label1, test_label1, train_label2, test_label2) = train_test_split(train_data,
|
||||||
|
train_label1,
|
||||||
|
train_label2,
|
||||||
|
test_size=split_size,
|
||||||
|
shuffle=True,
|
||||||
|
random_state=100)
|
||||||
|
if is_split:
|
||||||
|
return train_data, train_label1, train_label2, test_data, test_label1, test_label2
|
||||||
|
train_data = np.concatenate([train_data, test_data], axis=0)
|
||||||
|
train_label1 = np.concatenate([train_label1, test_label1], axis=0)
|
||||||
|
train_label2 = np.concatenate([train_label2, test_label2], axis=0)
|
||||||
|
# print(train_data.shape)
|
||||||
|
# print(train_label1.shape)
|
||||||
|
# print(train_label2.shape)
|
||||||
|
# print(train_data.shape)
|
||||||
|
|
||||||
|
return train_data, train_label1, train_label2
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def split_test_data(healthy_data, healthy_label1, healthy_label2, unhealthy_data, unhealthy_label1, unhealthy_label2,
|
||||||
|
split_size: float = 0.2, shuffle: bool = True):
|
||||||
|
data = np.concatenate([healthy_data, unhealthy_data], axis=0)
|
||||||
|
label1 = np.concatenate([healthy_label1, unhealthy_label1], axis=0)
|
||||||
|
label2 = np.concatenate([healthy_label2, unhealthy_label2], axis=0)
|
||||||
|
(train_data, test_data, train_label1, test_label1, train_label2, test_label2) = train_test_split(data,
|
||||||
|
label1,
|
||||||
|
label2,
|
||||||
|
test_size=split_size,
|
||||||
|
shuffle=shuffle,
|
||||||
|
random_state=100)
|
||||||
|
|
||||||
|
# print(train_data.shape)
|
||||||
|
# print(train_label1.shape)
|
||||||
|
# print(train_label2.shape)
|
||||||
|
# print(train_data.shape)
|
||||||
|
|
||||||
|
return train_data, train_label1, train_label2, test_data, test_label1, test_label2
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# trian_data:(300455,120,10)
|
||||||
|
# trian_label1:(300455,10)
|
||||||
|
# trian_label2:(300455,)
|
||||||
|
def train_step_one(train_data, train_label1, train_label2):
|
||||||
|
model = Joint_Monitoring()
|
||||||
|
# # # # TODO 需要运行编译一次,才能打印model.summary()
|
||||||
|
# model.build(input_shape=(batch_size, filter_num, dims))
|
||||||
|
# model.summary()
|
||||||
|
history_loss = []
|
||||||
|
history_val_loss = []
|
||||||
|
learning_rate = 1e-3
|
||||||
|
for epoch in range(EPOCH):
|
||||||
|
|
||||||
|
print()
|
||||||
|
print("EPOCH:", epoch, "/", EPOCH, ":")
|
||||||
|
train_data, train_label1, train_label2 = shuffle(train_data, train_label1, train_label2)
|
||||||
|
if epoch == 0:
|
||||||
|
train_data, train_label1, train_label2, val_data, val_label1, val_label2 = shuffle(train_data, train_label1,
|
||||||
|
train_label2,
|
||||||
|
is_split=True)
|
||||||
|
# print()
|
||||||
|
# print("EPOCH:", epoch, "/", EPOCH, ":")
|
||||||
|
# 用于让train知道,这是这个epoch中的第几次训练
|
||||||
|
z = 0
|
||||||
|
# 用于batch_size次再训练
|
||||||
|
k = 1
|
||||||
|
for data_1, label_1, label_2 in zip(train_data, train_label1, train_label2):
|
||||||
|
size, _, _ = train_data.shape
|
||||||
|
data_1 = tf.expand_dims(data_1, axis=0)
|
||||||
|
label_1 = tf.expand_dims(label_1, axis=0)
|
||||||
|
label_2 = tf.expand_dims(label_2, axis=0)
|
||||||
|
if batch_size != 1:
|
||||||
|
if k % batch_size == 1:
|
||||||
|
data = data_1
|
||||||
|
label1 = label_1
|
||||||
|
label2 = label_2
|
||||||
|
else:
|
||||||
|
data = tf.concat([data, data_1], axis=0)
|
||||||
|
label1 = tf.concat([label1, label_1], axis=0)
|
||||||
|
label2 = tf.concat([label2, label_2], axis=0)
|
||||||
|
else:
|
||||||
|
data = data_1
|
||||||
|
label1 = label_1
|
||||||
|
label2 = label_2
|
||||||
|
|
||||||
|
if k % batch_size == 0:
|
||||||
|
# label = tf.expand_dims(label, axis=-1)
|
||||||
|
loss_value, accuracy_value = model.train(input_tensor=data, label1=label1, label2=label2,
|
||||||
|
learning_rate=learning_rate,
|
||||||
|
is_first_time=True)
|
||||||
|
print(z * batch_size, "/", size, ":===============>", "loss:", loss_value.numpy())
|
||||||
|
k = 0
|
||||||
|
z = z + 1
|
||||||
|
k = k + 1
|
||||||
|
val_loss, val_accuracy = model.get_val_loss(val_data=val_data, val_label1=val_label1, val_label2=val_label2,
|
||||||
|
is_first_time=True)
|
||||||
|
SaveBestModel(model=model, save_name=save_name, history_loss=history_val_loss, loss_value=val_loss.numpy())
|
||||||
|
# SaveBestH5Model(model=model, save_name=save_name, history_loss=history_val_loss, loss_value=val_loss.numpy())
|
||||||
|
history_val_loss.append(val_loss)
|
||||||
|
history_loss.append(loss_value.numpy())
|
||||||
|
print('Training loss is :', loss_value.numpy())
|
||||||
|
print('Validating loss is :', val_loss.numpy())
|
||||||
|
if IsStopTraining(history_loss=history_val_loss, patience=7):
|
||||||
|
break
|
||||||
|
if Is_Reduce_learning_rate(history_loss=history_val_loss, patience=3):
|
||||||
|
if learning_rate >= 1e-4:
|
||||||
|
learning_rate = learning_rate * 0.1
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def train_step_two(step_one_model, step_two_model, train_data, train_label1, train_label2):
|
||||||
|
# step_two_model = Joint_Monitoring()
|
||||||
|
# step_two_model.build(input_shape=(batch_size, time_stamp, feature_num))
|
||||||
|
# step_two_model.summary()
|
||||||
|
history_loss = []
|
||||||
|
history_val_loss = []
|
||||||
|
history_accuracy = []
|
||||||
|
learning_rate = 1e-3
|
||||||
|
for epoch in range(EPOCH):
|
||||||
|
print()
|
||||||
|
print("EPOCH:", epoch, "/", EPOCH, ":")
|
||||||
|
train_data, train_label1, train_label2 = shuffle(train_data, train_label1, train_label2)
|
||||||
|
if epoch == 0:
|
||||||
|
train_data, train_label1, train_label2, val_data, val_label1, val_label2 = shuffle(train_data, train_label1,
|
||||||
|
train_label2,
|
||||||
|
is_split=True)
|
||||||
|
# print()
|
||||||
|
# print("EPOCH:", epoch, "/", EPOCH, ":")
|
||||||
|
# 用于让train知道,这是这个epoch中的第几次训练
|
||||||
|
z = 0
|
||||||
|
# 用于batch_size次再训练
|
||||||
|
k = 1
|
||||||
|
accuracy_num = 0
|
||||||
|
for data_1, label_1, label_2 in zip(train_data, train_label1, train_label2):
|
||||||
|
size, _, _ = train_data.shape
|
||||||
|
data_1 = tf.expand_dims(data_1, axis=0)
|
||||||
|
label_1 = tf.expand_dims(label_1, axis=0)
|
||||||
|
label_2 = tf.expand_dims(label_2, axis=0)
|
||||||
|
if batch_size != 1:
|
||||||
|
if k % batch_size == 1:
|
||||||
|
data = data_1
|
||||||
|
label1 = label_1
|
||||||
|
label2 = label_2
|
||||||
|
else:
|
||||||
|
data = tf.concat([data, data_1], axis=0)
|
||||||
|
label1 = tf.concat([label1, label_1], axis=0)
|
||||||
|
label2 = tf.concat([label2, label_2], axis=0)
|
||||||
|
else:
|
||||||
|
data = data_1
|
||||||
|
label1 = label_1
|
||||||
|
label2 = label_2
|
||||||
|
|
||||||
|
if k % batch_size == 0:
|
||||||
|
# label = tf.expand_dims(label, axis=-1)
|
||||||
|
# output1, output2, output3, _ = step_one_model.call(inputs=data, is_first_time=True)
|
||||||
|
loss_value, accuracy_value = step_two_model.train(input_tensor=data, label1=label1, label2=label2,
|
||||||
|
learning_rate=learning_rate,
|
||||||
|
is_first_time=False)
|
||||||
|
accuracy_num += accuracy_value
|
||||||
|
print(z * batch_size, "/", size, ":===============>", "loss:", loss_value.numpy(), "| accuracy:",
|
||||||
|
accuracy_num / ((z + 1) * batch_size))
|
||||||
|
k = 0
|
||||||
|
z = z + 1
|
||||||
|
k = k + 1
|
||||||
|
|
||||||
|
val_loss, val_accuracy = step_two_model.get_val_loss(val_data=val_data, val_label1=val_label1,
|
||||||
|
val_label2=val_label2,
|
||||||
|
is_first_time=False, step_one_model=step_one_model)
|
||||||
|
SaveBestModelByAccuracy(model=step_two_model, save_name=save_step_two_name, history_accuracy=history_accuracy,
|
||||||
|
accuracy_value=val_accuracy)
|
||||||
|
history_val_loss.append(val_loss)
|
||||||
|
history_loss.append(loss_value.numpy())
|
||||||
|
history_accuracy.append(val_accuracy)
|
||||||
|
print('Training loss is : {0} | Training accuracy is : {1}'.format(loss_value.numpy(),
|
||||||
|
accuracy_num / ((z + 1) * batch_size)))
|
||||||
|
print('Validating loss is : {0} | Validating accuracy is : {1}'.format(val_loss.numpy(), val_accuracy))
|
||||||
|
if IsStopTraining(history_loss=history_val_loss, patience=7):
|
||||||
|
break
|
||||||
|
if Is_Reduce_learning_rate(history_loss=history_val_loss, patience=3):
|
||||||
|
if learning_rate >= 1e-4:
|
||||||
|
learning_rate = learning_rate * 0.1
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def test(step_one_model, step_two_model, test_data, test_label1, test_label2):
|
||||||
|
history_loss = []
|
||||||
|
history_val_loss = []
|
||||||
|
|
||||||
|
val_loss, val_accuracy = step_two_model.get_val_loss(val_data=test_data, val_label1=test_label1,
|
||||||
|
val_label2=test_label2,
|
||||||
|
is_first_time=False, step_one_model=step_one_model)
|
||||||
|
|
||||||
|
history_val_loss.append(val_loss)
|
||||||
|
print("val_accuracy:", val_accuracy)
|
||||||
|
print("val_loss:", val_loss)
|
||||||
|
|
||||||
|
|
||||||
|
def showResult(step_two_model: Joint_Monitoring, test_data, isPlot: bool = False,isSave:bool=True):
|
||||||
|
# 获取模型的所有参数的个数
|
||||||
|
# step_two_model.count_params()
|
||||||
|
total_result = []
|
||||||
|
size, length, dims = test_data.shape
|
||||||
|
for epoch in range(0, size - batch_size + 1, batch_size):
|
||||||
|
each_test_data = test_data[epoch:epoch + batch_size, :, :]
|
||||||
|
_, _, _, output4 = step_two_model.call(each_test_data, is_first_time=False)
|
||||||
|
total_result.append(output4)
|
||||||
|
total_result = np.reshape(total_result, [total_result.__len__(), -1])
|
||||||
|
total_result = np.reshape(total_result, [-1, ])
|
||||||
|
|
||||||
|
#误报率,漏报率,准确性的计算
|
||||||
|
|
||||||
|
|
||||||
|
if isSave:
|
||||||
|
|
||||||
|
np.savetxt(save_mse_name, total_result, delimiter=',')
|
||||||
|
if isPlot:
|
||||||
|
plt.figure(1, figsize=(6.0, 2.68))
|
||||||
|
plt.subplots_adjust(left=0.1, right=0.94, bottom=0.2, top=0.9, wspace=None,
|
||||||
|
hspace=None)
|
||||||
|
plt.tight_layout()
|
||||||
|
font1 = {'family': 'Times New Roman', 'weight': 'normal', 'size': 10} # 设置坐标标签的字体大小,字体
|
||||||
|
|
||||||
|
plt.scatter(list(range(total_result.shape[0])), total_result, c='black', s=10)
|
||||||
|
# 画出 y=1 这条水平线
|
||||||
|
plt.axhline(0.5, c='red', label='Failure threshold')
|
||||||
|
# 箭头指向上面的水平线
|
||||||
|
# plt.arrow(35000, 0.9, 33000, 0.75, head_width=0.02, head_length=0.1, shape="full", fc='red', ec='red',
|
||||||
|
# alpha=0.9, overhang=0.5)
|
||||||
|
plt.text(test_data.shape[0] * 2 / 3+1000, 0.7, "Truth Fault", fontsize=10, color='red', verticalalignment='top')
|
||||||
|
plt.axvline(test_data.shape[0] * 2 / 3, c='blue', ls='-.')
|
||||||
|
plt.xticks(range(6),('06/09/17','12/09/17','18/09/17','24/09/17','29/09/17')) # 设置x轴的标尺
|
||||||
|
plt.tick_params() #设置轴显示
|
||||||
|
plt.xlabel("time",fontdict=font1)
|
||||||
|
plt.ylabel("confience",fontdict=font1)
|
||||||
|
plt.text(total_result.shape[0] * 4 / 5, 0.6, "Fault", fontsize=10, color='black', verticalalignment='top',
|
||||||
|
horizontalalignment='center',
|
||||||
|
bbox={'facecolor': 'grey',
|
||||||
|
'pad': 10},fontdict=font1)
|
||||||
|
plt.text(total_result.shape[0] * 1 / 3, 0.4, "Norm", fontsize=10, color='black', verticalalignment='top',
|
||||||
|
horizontalalignment='center',
|
||||||
|
bbox={'facecolor': 'grey',
|
||||||
|
'pad': 10},fontdict=font1)
|
||||||
|
|
||||||
|
plt.grid()
|
||||||
|
# plt.ylim(0, 1)
|
||||||
|
# plt.xlim(-50, 1300)
|
||||||
|
# plt.legend("", loc='upper left')
|
||||||
|
plt.show()
|
||||||
|
return total_result
|
||||||
|
|
||||||
|
|
||||||
|
def get_MSE(data, label, new_model, isStandard: bool = True, isPlot: bool = True, predictI: int = 1):
|
||||||
|
predicted_data1 = []
|
||||||
|
predicted_data2 = []
|
||||||
|
predicted_data3 = []
|
||||||
|
size, length, dims = data.shape
|
||||||
|
for epoch in range(0, size, batch_size):
|
||||||
|
each_test_data = data[epoch:epoch + batch_size, :, :]
|
||||||
|
output1, output2, output3, _ = new_model.call(inputs=each_test_data, is_first_time=True)
|
||||||
|
if epoch == 0:
|
||||||
|
predicted_data1 = output1
|
||||||
|
predicted_data2 = output2
|
||||||
|
predicted_data3 = output3
|
||||||
|
else:
|
||||||
|
predicted_data1 = np.concatenate([predicted_data1, output1], axis=0)
|
||||||
|
predicted_data2 = np.concatenate([predicted_data2, output2], axis=0)
|
||||||
|
predicted_data3 = np.concatenate([predicted_data3, output3], axis=0)
|
||||||
|
|
||||||
|
predicted_data1 = np.reshape(predicted_data1, [-1, 10])
|
||||||
|
predicted_data2 = np.reshape(predicted_data2, [-1, 10])
|
||||||
|
predicted_data3 = np.reshape(predicted_data3, [-1, 10])
|
||||||
|
predict_data = 0
|
||||||
|
|
||||||
|
predict_data = predicted_data1
|
||||||
|
mseList = []
|
||||||
|
meanList = []
|
||||||
|
maxList = []
|
||||||
|
|
||||||
|
for i in range(1, 4):
|
||||||
|
print("i:", i)
|
||||||
|
if i == 1:
|
||||||
|
predict_data = predicted_data1
|
||||||
|
elif i == 2:
|
||||||
|
predict_data = predicted_data2
|
||||||
|
elif i == 3:
|
||||||
|
predict_data = predicted_data3
|
||||||
|
temp = np.abs(predict_data - label)
|
||||||
|
temp1 = (temp - np.broadcast_to(np.mean(temp, axis=0), shape=predict_data.shape))
|
||||||
|
temp2 = np.broadcast_to(np.sqrt(np.var(temp, axis=0)), shape=predict_data.shape)
|
||||||
|
temp3 = temp1 / temp2
|
||||||
|
mse = np.sum((temp1 / temp2) ** 2, axis=1)
|
||||||
|
|
||||||
|
print("mse.shape:", mse.shape)
|
||||||
|
# mse=np.mean((predicted_data-label)**2,axis=1)
|
||||||
|
# print("mse", mse)
|
||||||
|
mseList.append(mse)
|
||||||
|
if isStandard:
|
||||||
|
dims, = mse.shape
|
||||||
|
mean = np.mean(mse)
|
||||||
|
std = np.sqrt(np.var(mse))
|
||||||
|
max = mean + 3 * std
|
||||||
|
print("max.shape:", max.shape)
|
||||||
|
# min = mean-3*std
|
||||||
|
max = np.broadcast_to(max, shape=[dims, ])
|
||||||
|
# min = np.broadcast_to(min,shape=[dims,])
|
||||||
|
mean = np.broadcast_to(mean, shape=[dims, ])
|
||||||
|
if isPlot:
|
||||||
|
plt.figure(random.randint(1, 100))
|
||||||
|
plt.plot(max)
|
||||||
|
plt.plot(mse)
|
||||||
|
plt.plot(mean)
|
||||||
|
# plt.plot(min)
|
||||||
|
plt.show()
|
||||||
|
maxList.append(max)
|
||||||
|
meanList.append(mean)
|
||||||
|
else:
|
||||||
|
if isPlot:
|
||||||
|
plt.figure(random.randint(1, 100))
|
||||||
|
plt.plot(mse)
|
||||||
|
# plt.plot(min)
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
return mseList, meanList, maxList
|
||||||
|
# pass
|
||||||
|
|
||||||
|
|
||||||
|
# healthy_data是健康数据,用于确定阈值,all_data是完整的数据,用于模型出结果
|
||||||
|
def getResult(model: tf.keras.Model, healthy_data, healthy_label, unhealthy_data, unhealthy_label, isPlot: bool = False,
|
||||||
|
isSave: bool = False, predictI: int = 1):
|
||||||
|
# TODO 计算MSE确定阈值
|
||||||
|
# plt.ion()
|
||||||
|
mseList, meanList, maxList = get_MSE(healthy_data, healthy_label, model)
|
||||||
|
mse1List, _, _ = get_MSE(unhealthy_data, unhealthy_label, model, isStandard=False)
|
||||||
|
|
||||||
|
for mse, mean, max, mse1,j in zip(mseList, meanList, maxList, mse1List,range(3)):
|
||||||
|
|
||||||
|
# 误报率的计算
|
||||||
|
total, = mse.shape
|
||||||
|
faultNum = 0
|
||||||
|
faultList = []
|
||||||
|
for i in range(total):
|
||||||
|
if (mse[i] > max[i]):
|
||||||
|
faultNum += 1
|
||||||
|
faultList.append(mse[i])
|
||||||
|
|
||||||
|
fault_rate = faultNum / total
|
||||||
|
print("误报率:", fault_rate)
|
||||||
|
|
||||||
|
# 漏报率计算
|
||||||
|
missNum = 0
|
||||||
|
missList = []
|
||||||
|
all, = mse1.shape
|
||||||
|
for i in range(all):
|
||||||
|
if (mse1[i] < max[0]):
|
||||||
|
missNum += 1
|
||||||
|
missList.append(mse1[i])
|
||||||
|
|
||||||
|
miss_rate = missNum / all
|
||||||
|
print("漏报率:", miss_rate)
|
||||||
|
|
||||||
|
# 总体图
|
||||||
|
print("mse:", mse)
|
||||||
|
print("mse1:", mse1)
|
||||||
|
print("============================================")
|
||||||
|
total_mse = np.concatenate([mse, mse1], axis=0)
|
||||||
|
total_max = np.broadcast_to(max[0], shape=[total_mse.shape[0], ])
|
||||||
|
# min = np.broadcast_to(min,shape=[dims,])
|
||||||
|
total_mean = np.broadcast_to(mean[0], shape=[total_mse.shape[0], ])
|
||||||
|
|
||||||
|
if isSave:
|
||||||
|
save_mse_name1=save_mse_name[:-4]+"_predict"+str(j+1)+".csv"
|
||||||
|
save_max_name1=save_max_name[:-4]+"_predict"+str(j+1)+".csv"
|
||||||
|
|
||||||
|
np.savetxt(save_mse_name1,total_mse, delimiter=',')
|
||||||
|
np.savetxt(save_max_name1,total_max, delimiter=',')
|
||||||
|
|
||||||
|
|
||||||
|
plt.figure(random.randint(1, 100))
|
||||||
|
plt.plot(total_max)
|
||||||
|
plt.plot(total_mse)
|
||||||
|
plt.plot(total_mean)
|
||||||
|
# plt.plot(min)
|
||||||
|
plt.show()
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
total_data = loadData.execute(N=feature_num, file_name=file_name)
|
||||||
|
total_data = normalization(data=total_data)
|
||||||
|
train_data_healthy, train_label1_healthy, train_label2_healthy = get_training_data_overlapping(
|
||||||
|
total_data[:healthy_date, :], is_Healthy=True)
|
||||||
|
train_data_unhealthy, train_label1_unhealthy, train_label2_unhealthy = get_training_data_overlapping(
|
||||||
|
total_data[healthy_date - time_stamp + unhealthy_patience:unhealthy_date, :],
|
||||||
|
is_Healthy=False)
|
||||||
|
#### TODO 第一步训练
|
||||||
|
# 单次测试
|
||||||
|
# train_step_one(train_data=train_data_healthy[:32, :, :], train_label1=train_label1_healthy[:32, :],train_label2=train_label2_healthy[:32, ])
|
||||||
|
# train_step_one(train_data=train_data_healthy, train_label1=train_label1_healthy, train_label2=train_label2_healthy)
|
||||||
|
|
||||||
|
# 导入第一步已经训练好的模型,一个继续训练,一个只输出结果
|
||||||
|
# step_one_model = Joint_Monitoring()
|
||||||
|
# # step_one_model.load_weights(save_name)
|
||||||
|
# #
|
||||||
|
# step_two_model = Joint_Monitoring()
|
||||||
|
# step_two_model.load_weights(save_name)
|
||||||
|
|
||||||
|
#### TODO 第二步训练
|
||||||
|
### healthy_data.shape: (300333,120,10)
|
||||||
|
### unhealthy_data.shape: (16594,10)
|
||||||
|
healthy_size, _, _ = train_data_healthy.shape
|
||||||
|
unhealthy_size, _, _ = train_data_unhealthy.shape
|
||||||
|
train_data, train_label1, train_label2, test_data, test_label1, test_label2 = split_test_data(
|
||||||
|
healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:, :, :],
|
||||||
|
healthy_label1=train_label1_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||||
|
healthy_label2=train_label2_healthy[healthy_size - 2 * unhealthy_size:, ], unhealthy_data=train_data_unhealthy,
|
||||||
|
unhealthy_label1=train_label1_unhealthy, unhealthy_label2=train_label2_unhealthy)
|
||||||
|
# train_step_two(step_one_model=step_one_model, step_two_model=step_two_model,
|
||||||
|
# train_data=train_data,
|
||||||
|
# train_label1=train_label1, train_label2=np.expand_dims(train_label2, axis=-1))
|
||||||
|
|
||||||
|
healthy_size, _, _ = train_data_healthy.shape
|
||||||
|
unhealthy_size, _, _ = train_data_unhealthy.shape
|
||||||
|
# all_data, _, _ = get_training_data_overlapping(
|
||||||
|
# total_data[healthy_size - 2 * unhealthy_size:unhealthy_date, :], is_Healthy=True)
|
||||||
|
|
||||||
|
##出结果单次测试
|
||||||
|
# getResult(step_one_model,
|
||||||
|
# healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:healthy_size - 2 * unhealthy_size + 200,
|
||||||
|
# :],
|
||||||
|
# healthy_label=train_label1_healthy[
|
||||||
|
# healthy_size - 2 * unhealthy_size:healthy_size - 2 * unhealthy_size + 200, :],
|
||||||
|
# unhealthy_data=train_data_unhealthy[:200, :], unhealthy_label=train_label1_unhealthy[:200, :],isSave=True)
|
||||||
|
|
||||||
|
### TODO 测试测试集
|
||||||
|
# step_one_model = Joint_Monitoring()
|
||||||
|
# step_one_model.load_weights(save_name)
|
||||||
|
step_two_model = Joint_Monitoring()
|
||||||
|
step_two_model.load_weights(save_step_two_name)
|
||||||
|
# test(step_one_model=step_one_model, step_two_model=step_two_model, test_data=test_data, test_label1=test_label1,
|
||||||
|
# test_label2=np.expand_dims(test_label2, axis=-1))
|
||||||
|
|
||||||
|
###TODO 展示全部的结果
|
||||||
|
all_data, _, _ = get_training_data_overlapping(
|
||||||
|
total_data[healthy_size - 2 * unhealthy_size:unhealthy_date, :], is_Healthy=True)
|
||||||
|
# all_data = np.concatenate([])
|
||||||
|
# 单次测试
|
||||||
|
# showResult(step_two_model, test_data=all_data[:32], isPlot=True)
|
||||||
|
showResult(step_two_model, test_data=all_data, isPlot=True)
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
@ -0,0 +1,714 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# coding: utf-8
|
||||||
|
|
||||||
|
'''
|
||||||
|
@Author : dingjiawen
|
||||||
|
@Date : 2022/10/11 18:55
|
||||||
|
@Usage :
|
||||||
|
@Desc : RNet直接进行分类
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
import tensorflow as tf
|
||||||
|
import tensorflow.keras
|
||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
from model.DepthwiseCon1D.DepthwiseConv1D import DepthwiseConv1D
|
||||||
|
from model.Dynamic_channelAttention.Dynamic_channelAttention import DynamicChannelAttention
|
||||||
|
from condition_monitoring.data_deal import loadData
|
||||||
|
from model.Joint_Monitoring.compare.RNet_5 import Joint_Monitoring
|
||||||
|
|
||||||
|
from model.CommonFunction.CommonFunction import *
|
||||||
|
from sklearn.model_selection import train_test_split
|
||||||
|
from tensorflow.keras.models import load_model, save_model
|
||||||
|
import random
|
||||||
|
|
||||||
|
'''超参数设置'''
|
||||||
|
time_stamp = 120
|
||||||
|
feature_num = 10
|
||||||
|
batch_size = 16
|
||||||
|
learning_rate = 0.001
|
||||||
|
EPOCH = 101
|
||||||
|
model_name = "RNet_C"
|
||||||
|
'''EWMA超参数'''
|
||||||
|
K = 18
|
||||||
|
namuda = 0.01
|
||||||
|
'''保存名称'''
|
||||||
|
|
||||||
|
save_name = "./model/weight/{0}/weight".format(model_name,
|
||||||
|
time_stamp,
|
||||||
|
feature_num,
|
||||||
|
batch_size,
|
||||||
|
EPOCH)
|
||||||
|
save_step_two_name = "./model/two_weight/{0}_weight_epoch6_99899_9996/weight".format(model_name,
|
||||||
|
time_stamp,
|
||||||
|
feature_num,
|
||||||
|
batch_size,
|
||||||
|
EPOCH)
|
||||||
|
|
||||||
|
save_mse_name = "./mse/RNet_C/{0}_timestamp{1}_feature{2}_result.csv".format(model_name,
|
||||||
|
time_stamp,
|
||||||
|
feature_num,
|
||||||
|
batch_size,
|
||||||
|
EPOCH)
|
||||||
|
save_max_name = "./mse/RNet_C/{0}_timestamp{1}_feature{2}_max.csv".format(model_name,
|
||||||
|
time_stamp,
|
||||||
|
feature_num,
|
||||||
|
batch_size,
|
||||||
|
EPOCH)
|
||||||
|
|
||||||
|
'''文件名'''
|
||||||
|
file_name = "G:\data\SCADA数据\jb4q_8_delete_total_zero.csv"
|
||||||
|
|
||||||
|
'''
|
||||||
|
文件说明:jb4q_8_delete_total_zero.csv是删除了只删除了全是0的列的文件
|
||||||
|
文件从0:415548行均是正常值(2019/7.30 00:00:00 - 2019/9/18 11:14:00)
|
||||||
|
从415549:432153行均是异常值(2019/9/18 11:21:01 - 2021/1/18 00:00:00)
|
||||||
|
'''
|
||||||
|
'''文件参数'''
|
||||||
|
# 最后正常的时间点
|
||||||
|
healthy_date = 415548
|
||||||
|
# 最后异常的时间点
|
||||||
|
unhealthy_date = 432153
|
||||||
|
# 异常容忍程度
|
||||||
|
unhealthy_patience = 5
|
||||||
|
|
||||||
|
|
||||||
|
# 画图相关设置
|
||||||
|
font1 = {'family': 'Times New Roman', 'weight': 'normal', 'size': 10} # 设置坐标标签的字体大小,字体
|
||||||
|
|
||||||
|
|
||||||
|
def remove(data, time_stamp=time_stamp):
|
||||||
|
rows, cols = data.shape
|
||||||
|
print("remove_data.shape:", data.shape)
|
||||||
|
num = int(rows / time_stamp)
|
||||||
|
|
||||||
|
return data[:num * time_stamp, :]
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# 不重叠采样
|
||||||
|
def get_training_data(data, time_stamp: int = time_stamp):
|
||||||
|
removed_data = remove(data=data)
|
||||||
|
rows, cols = removed_data.shape
|
||||||
|
print("removed_data.shape:", data.shape)
|
||||||
|
print("removed_data:", removed_data)
|
||||||
|
train_data = np.reshape(removed_data, [-1, time_stamp, cols])
|
||||||
|
print("train_data:", train_data)
|
||||||
|
batchs, time_stamp, cols = train_data.shape
|
||||||
|
|
||||||
|
for i in range(1, batchs):
|
||||||
|
each_label = np.expand_dims(train_data[i, 0, :], axis=0)
|
||||||
|
if i == 1:
|
||||||
|
train_label = each_label
|
||||||
|
else:
|
||||||
|
train_label = np.concatenate([train_label, each_label], axis=0)
|
||||||
|
|
||||||
|
print("train_data.shape:", train_data.shape)
|
||||||
|
print("train_label.shape", train_label.shape)
|
||||||
|
return train_data[:-1, :], train_label
|
||||||
|
|
||||||
|
|
||||||
|
# 重叠采样
|
||||||
|
def get_training_data_overlapping(data, time_stamp: int = time_stamp, is_Healthy: bool = True):
|
||||||
|
rows, cols = data.shape
|
||||||
|
train_data = np.empty(shape=[rows - time_stamp - 1, time_stamp, cols])
|
||||||
|
train_label = np.empty(shape=[rows - time_stamp - 1, cols])
|
||||||
|
for i in range(rows):
|
||||||
|
if i + time_stamp >= rows:
|
||||||
|
break
|
||||||
|
if i + time_stamp < rows - 1:
|
||||||
|
train_data[i] = data[i:i + time_stamp]
|
||||||
|
train_label[i] = data[i + time_stamp]
|
||||||
|
|
||||||
|
print("重叠采样以后:")
|
||||||
|
print("data:", train_data) # (300334,120,10)
|
||||||
|
print("label:", train_label) # (300334,10)
|
||||||
|
|
||||||
|
if is_Healthy:
|
||||||
|
train_label2 = np.ones(shape=[train_label.shape[0]])
|
||||||
|
else:
|
||||||
|
train_label2 = np.zeros(shape=[train_label.shape[0]])
|
||||||
|
|
||||||
|
print("label2:", train_label2)
|
||||||
|
|
||||||
|
return train_data, train_label, train_label2
|
||||||
|
|
||||||
|
|
||||||
|
# RepConv重参数化卷积
|
||||||
|
def RepConv(input_tensor, k=3):
|
||||||
|
_, _, output_dim = input_tensor.shape
|
||||||
|
conv1 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=k, strides=1, padding='SAME')(input_tensor)
|
||||||
|
b1 = tf.keras.layers.BatchNormalization()(conv1)
|
||||||
|
|
||||||
|
conv2 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=1, strides=1, padding='SAME')(input_tensor)
|
||||||
|
b2 = tf.keras.layers.BatchNormalization()(conv2)
|
||||||
|
|
||||||
|
b3 = tf.keras.layers.BatchNormalization()(input_tensor)
|
||||||
|
|
||||||
|
out = tf.keras.layers.Add()([b1, b2, b3])
|
||||||
|
out = tf.nn.relu(out)
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
# RepBlock模块
|
||||||
|
def RepBlock(input_tensor, num: int = 3):
|
||||||
|
for i in range(num):
|
||||||
|
input_tensor = RepConv(input_tensor)
|
||||||
|
return input_tensor
|
||||||
|
|
||||||
|
|
||||||
|
# GAP 全局平均池化
|
||||||
|
def Global_avg_channelAttention(input_tensor):
|
||||||
|
_, length, channel = input_tensor.shape
|
||||||
|
DWC1 = DepthwiseConv1D(kernel_size=1, padding='SAME')(input_tensor)
|
||||||
|
GAP = tf.keras.layers.GlobalAvgPool1D()(DWC1)
|
||||||
|
c1 = tf.keras.layers.Conv1D(filters=channel, kernel_size=1, padding='SAME')(GAP)
|
||||||
|
s1 = tf.nn.sigmoid(c1)
|
||||||
|
output = tf.multiply(input_tensor, s1)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
# GDP 全局动态池化
|
||||||
|
def Global_Dynamic_channelAttention(input_tensor):
|
||||||
|
_, length, channel = input_tensor.shape
|
||||||
|
DWC1 = DepthwiseConv1D(kernel_size=1, padding='SAME')(input_tensor)
|
||||||
|
|
||||||
|
# GAP
|
||||||
|
GAP = tf.keras.layers.GlobalAvgPool1D()(DWC1)
|
||||||
|
c1 = tf.keras.layers.Conv1D(filters=channel, kernel_size=1, padding='SAME')(GAP)
|
||||||
|
s1 = tf.nn.sigmoid(c1)
|
||||||
|
|
||||||
|
# GMP
|
||||||
|
GMP = tf.keras.layers.GlobalMaxPool1D()(DWC1)
|
||||||
|
c2 = tf.keras.layers.Conv1D(filters=channel, kernel_size=1, padding='SAME')(GMP)
|
||||||
|
s3 = tf.nn.sigmoid(c2)
|
||||||
|
|
||||||
|
output = tf.multiply(input_tensor, s1)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
# 归一化
|
||||||
|
def normalization(data):
|
||||||
|
rows, cols = data.shape
|
||||||
|
print("归一化之前:", data)
|
||||||
|
print(data.shape)
|
||||||
|
print("======================")
|
||||||
|
|
||||||
|
# 归一化
|
||||||
|
max = np.max(data, axis=0)
|
||||||
|
max = np.broadcast_to(max, [rows, cols])
|
||||||
|
min = np.min(data, axis=0)
|
||||||
|
min = np.broadcast_to(min, [rows, cols])
|
||||||
|
|
||||||
|
data = (data - min) / (max - min)
|
||||||
|
print("归一化之后:", data)
|
||||||
|
print(data.shape)
|
||||||
|
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
# 正则化
|
||||||
|
def Regularization(data):
|
||||||
|
rows, cols = data.shape
|
||||||
|
print("正则化之前:", data)
|
||||||
|
print(data.shape)
|
||||||
|
print("======================")
|
||||||
|
|
||||||
|
# 正则化
|
||||||
|
mean = np.mean(data, axis=0)
|
||||||
|
mean = np.broadcast_to(mean, shape=[rows, cols])
|
||||||
|
dst = np.sqrt(np.var(data, axis=0))
|
||||||
|
dst = np.broadcast_to(dst, shape=[rows, cols])
|
||||||
|
data = (data - mean) / dst
|
||||||
|
print("正则化之后:", data)
|
||||||
|
print(data.shape)
|
||||||
|
|
||||||
|
return data
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def EWMA(data, K=K, namuda=namuda):
|
||||||
|
# t是啥暂时未知
|
||||||
|
t = 0
|
||||||
|
mid = np.mean(data, axis=0)
|
||||||
|
standard = np.sqrt(np.var(data, axis=0))
|
||||||
|
UCL = mid + K * standard * np.sqrt(namuda / (2 - namuda) * (1 - (1 - namuda) ** 2 * t))
|
||||||
|
LCL = mid - K * standard * np.sqrt(namuda / (2 - namuda) * (1 - (1 - namuda) ** 2 * t))
|
||||||
|
return mid, UCL, LCL
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def condition_monitoring_model():
|
||||||
|
input = tf.keras.Input(shape=[time_stamp, feature_num])
|
||||||
|
conv1 = tf.keras.layers.Conv1D(filters=256, kernel_size=1)(input)
|
||||||
|
GRU1 = tf.keras.layers.GRU(128, return_sequences=False)(conv1)
|
||||||
|
d1 = tf.keras.layers.Dense(300)(GRU1)
|
||||||
|
output = tf.keras.layers.Dense(10)(d1)
|
||||||
|
|
||||||
|
model = tf.keras.Model(inputs=input, outputs=output)
|
||||||
|
|
||||||
|
return model
|
||||||
|
|
||||||
|
|
||||||
|
# trian_data:(300455,120,10)
|
||||||
|
# trian_label1:(300455,10)
|
||||||
|
# trian_label2:(300455,)
|
||||||
|
def shuffle(train_data, train_label1, train_label2, is_split: bool = False, split_size: float = 0.2):
|
||||||
|
(train_data, test_data, train_label1, test_label1, train_label2, test_label2) = train_test_split(train_data,
|
||||||
|
train_label1,
|
||||||
|
train_label2,
|
||||||
|
test_size=split_size,
|
||||||
|
shuffle=True,
|
||||||
|
random_state=100)
|
||||||
|
if is_split:
|
||||||
|
return train_data, train_label1, train_label2, test_data, test_label1, test_label2
|
||||||
|
train_data = np.concatenate([train_data, test_data], axis=0)
|
||||||
|
train_label1 = np.concatenate([train_label1, test_label1], axis=0)
|
||||||
|
train_label2 = np.concatenate([train_label2, test_label2], axis=0)
|
||||||
|
# print(train_data.shape)
|
||||||
|
# print(train_label1.shape)
|
||||||
|
# print(train_label2.shape)
|
||||||
|
# print(train_data.shape)
|
||||||
|
|
||||||
|
return train_data, train_label1, train_label2
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def split_test_data(healthy_data, healthy_label1, healthy_label2, unhealthy_data, unhealthy_label1, unhealthy_label2,
|
||||||
|
split_size: float = 0.2, shuffle: bool = True):
|
||||||
|
data = np.concatenate([healthy_data, unhealthy_data], axis=0)
|
||||||
|
label1 = np.concatenate([healthy_label1, unhealthy_label1], axis=0)
|
||||||
|
label2 = np.concatenate([healthy_label2, unhealthy_label2], axis=0)
|
||||||
|
(train_data, test_data, train_label1, test_label1, train_label2, test_label2) = train_test_split(data,
|
||||||
|
label1,
|
||||||
|
label2,
|
||||||
|
test_size=split_size,
|
||||||
|
shuffle=shuffle,
|
||||||
|
random_state=100)
|
||||||
|
|
||||||
|
# print(train_data.shape)
|
||||||
|
# print(train_label1.shape)
|
||||||
|
# print(train_label2.shape)
|
||||||
|
# print(train_data.shape)
|
||||||
|
|
||||||
|
return train_data, train_label1, train_label2, test_data, test_label1, test_label2
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# trian_data:(300455,120,10)
|
||||||
|
# trian_label1:(300455,10)
|
||||||
|
# trian_label2:(300455,)
|
||||||
|
def train_step_one(train_data, train_label1, train_label2):
|
||||||
|
model = Joint_Monitoring()
|
||||||
|
# # # # TODO 需要运行编译一次,才能打印model.summary()
|
||||||
|
# model.build(input_shape=(batch_size, filter_num, dims))
|
||||||
|
# model.summary()
|
||||||
|
history_loss = []
|
||||||
|
history_val_loss = []
|
||||||
|
learning_rate = 1e-3
|
||||||
|
for epoch in range(EPOCH):
|
||||||
|
|
||||||
|
print()
|
||||||
|
print("EPOCH:", epoch, "/", EPOCH, ":")
|
||||||
|
train_data, train_label1, train_label2 = shuffle(train_data, train_label1, train_label2)
|
||||||
|
if epoch == 0:
|
||||||
|
train_data, train_label1, train_label2, val_data, val_label1, val_label2 = shuffle(train_data, train_label1,
|
||||||
|
train_label2,
|
||||||
|
is_split=True)
|
||||||
|
# print()
|
||||||
|
# print("EPOCH:", epoch, "/", EPOCH, ":")
|
||||||
|
# 用于让train知道,这是这个epoch中的第几次训练
|
||||||
|
z = 0
|
||||||
|
# 用于batch_size次再训练
|
||||||
|
k = 1
|
||||||
|
for data_1, label_1, label_2 in zip(train_data, train_label1, train_label2):
|
||||||
|
size, _, _ = train_data.shape
|
||||||
|
data_1 = tf.expand_dims(data_1, axis=0)
|
||||||
|
label_1 = tf.expand_dims(label_1, axis=0)
|
||||||
|
label_2 = tf.expand_dims(label_2, axis=0)
|
||||||
|
if batch_size != 1:
|
||||||
|
if k % batch_size == 1:
|
||||||
|
data = data_1
|
||||||
|
label1 = label_1
|
||||||
|
label2 = label_2
|
||||||
|
else:
|
||||||
|
data = tf.concat([data, data_1], axis=0)
|
||||||
|
label1 = tf.concat([label1, label_1], axis=0)
|
||||||
|
label2 = tf.concat([label2, label_2], axis=0)
|
||||||
|
else:
|
||||||
|
data = data_1
|
||||||
|
label1 = label_1
|
||||||
|
label2 = label_2
|
||||||
|
|
||||||
|
if k % batch_size == 0:
|
||||||
|
# label = tf.expand_dims(label, axis=-1)
|
||||||
|
loss_value, accuracy_value = model.train(input_tensor=data, label1=label1, label2=label2,
|
||||||
|
learning_rate=learning_rate,
|
||||||
|
is_first_time=True)
|
||||||
|
print(z * batch_size, "/", size, ":===============>", "loss:", loss_value.numpy())
|
||||||
|
k = 0
|
||||||
|
z = z + 1
|
||||||
|
k = k + 1
|
||||||
|
val_loss, val_accuracy = model.get_val_loss(val_data=val_data, val_label1=val_label1, val_label2=val_label2,
|
||||||
|
is_first_time=True)
|
||||||
|
SaveBestModel(model=model, save_name=save_name, history_loss=history_val_loss, loss_value=val_loss.numpy())
|
||||||
|
# SaveBestH5Model(model=model, save_name=save_name, history_loss=history_val_loss, loss_value=val_loss.numpy())
|
||||||
|
history_val_loss.append(val_loss)
|
||||||
|
history_loss.append(loss_value.numpy())
|
||||||
|
print('Training loss is :', loss_value.numpy())
|
||||||
|
print('Validating loss is :', val_loss.numpy())
|
||||||
|
if IsStopTraining(history_loss=history_val_loss, patience=7):
|
||||||
|
break
|
||||||
|
if Is_Reduce_learning_rate(history_loss=history_val_loss, patience=3):
|
||||||
|
if learning_rate >= 1e-4:
|
||||||
|
learning_rate = learning_rate * 0.1
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def train_step_two(step_one_model, step_two_model, train_data, train_label1, train_label2):
|
||||||
|
# step_two_model = Joint_Monitoring()
|
||||||
|
# step_two_model.build(input_shape=(batch_size, time_stamp, feature_num))
|
||||||
|
# step_two_model.summary()
|
||||||
|
history_loss = []
|
||||||
|
history_val_loss = []
|
||||||
|
history_accuracy = []
|
||||||
|
learning_rate = 1e-3
|
||||||
|
for epoch in range(EPOCH):
|
||||||
|
print()
|
||||||
|
print("EPOCH:", epoch, "/", EPOCH, ":")
|
||||||
|
train_data, train_label1, train_label2 = shuffle(train_data, train_label1, train_label2)
|
||||||
|
if epoch == 0:
|
||||||
|
train_data, train_label1, train_label2, val_data, val_label1, val_label2 = shuffle(train_data, train_label1,
|
||||||
|
train_label2,
|
||||||
|
is_split=True)
|
||||||
|
# print()
|
||||||
|
# print("EPOCH:", epoch, "/", EPOCH, ":")
|
||||||
|
# 用于让train知道,这是这个epoch中的第几次训练
|
||||||
|
z = 0
|
||||||
|
# 用于batch_size次再训练
|
||||||
|
k = 1
|
||||||
|
accuracy_num = 0
|
||||||
|
for data_1, label_1, label_2 in zip(train_data, train_label1, train_label2):
|
||||||
|
size, _, _ = train_data.shape
|
||||||
|
data_1 = tf.expand_dims(data_1, axis=0)
|
||||||
|
label_1 = tf.expand_dims(label_1, axis=0)
|
||||||
|
label_2 = tf.expand_dims(label_2, axis=0)
|
||||||
|
if batch_size != 1:
|
||||||
|
if k % batch_size == 1:
|
||||||
|
data = data_1
|
||||||
|
label1 = label_1
|
||||||
|
label2 = label_2
|
||||||
|
else:
|
||||||
|
data = tf.concat([data, data_1], axis=0)
|
||||||
|
label1 = tf.concat([label1, label_1], axis=0)
|
||||||
|
label2 = tf.concat([label2, label_2], axis=0)
|
||||||
|
else:
|
||||||
|
data = data_1
|
||||||
|
label1 = label_1
|
||||||
|
label2 = label_2
|
||||||
|
|
||||||
|
if k % batch_size == 0:
|
||||||
|
# label = tf.expand_dims(label, axis=-1)
|
||||||
|
# output1, output2, output3, _ = step_one_model.call(inputs=data, is_first_time=True)
|
||||||
|
loss_value, accuracy_value = step_two_model.train(input_tensor=data, label1=label1, label2=label2,
|
||||||
|
learning_rate=learning_rate,
|
||||||
|
is_first_time=False)
|
||||||
|
accuracy_num += accuracy_value
|
||||||
|
print(z * batch_size, "/", size, ":===============>", "loss:", loss_value.numpy(), "| accuracy:",
|
||||||
|
accuracy_num / ((z + 1) * batch_size))
|
||||||
|
k = 0
|
||||||
|
z = z + 1
|
||||||
|
k = k + 1
|
||||||
|
|
||||||
|
val_loss, val_accuracy = step_two_model.get_val_loss(val_data=val_data, val_label1=val_label1,
|
||||||
|
val_label2=val_label2,
|
||||||
|
is_first_time=False, step_one_model=step_one_model)
|
||||||
|
SaveBestModelByAccuracy(model=step_two_model, save_name=save_step_two_name, history_accuracy=history_accuracy,
|
||||||
|
accuracy_value=val_accuracy)
|
||||||
|
history_val_loss.append(val_loss)
|
||||||
|
history_loss.append(loss_value.numpy())
|
||||||
|
history_accuracy.append(val_accuracy)
|
||||||
|
print('Training loss is : {0} | Training accuracy is : {1}'.format(loss_value.numpy(),
|
||||||
|
accuracy_num / ((z + 1) * batch_size)))
|
||||||
|
print('Validating loss is : {0} | Validating accuracy is : {1}'.format(val_loss.numpy(), val_accuracy))
|
||||||
|
if IsStopTraining(history_loss=history_val_loss, patience=7):
|
||||||
|
break
|
||||||
|
if Is_Reduce_learning_rate(history_loss=history_val_loss, patience=3):
|
||||||
|
if learning_rate >= 1e-4:
|
||||||
|
learning_rate = learning_rate * 0.1
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def test(step_one_model, step_two_model, test_data, test_label1, test_label2):
|
||||||
|
history_loss = []
|
||||||
|
history_val_loss = []
|
||||||
|
|
||||||
|
val_loss, val_accuracy = step_two_model.get_val_loss(val_data=test_data, val_label1=test_label1,
|
||||||
|
val_label2=test_label2,
|
||||||
|
is_first_time=False, step_one_model=step_one_model)
|
||||||
|
|
||||||
|
history_val_loss.append(val_loss)
|
||||||
|
print("val_accuracy:", val_accuracy)
|
||||||
|
print("val_loss:", val_loss)
|
||||||
|
|
||||||
|
|
||||||
|
def showResult(step_two_model: Joint_Monitoring, test_data, isPlot: bool = False,isSave:bool=True):
|
||||||
|
# 获取模型的所有参数的个数
|
||||||
|
# step_two_model.count_params()
|
||||||
|
total_result = []
|
||||||
|
size, length, dims = test_data.shape
|
||||||
|
for epoch in range(0, size - batch_size + 1, batch_size):
|
||||||
|
each_test_data = test_data[epoch:epoch + batch_size, :, :]
|
||||||
|
_, _, _, output4 = step_two_model.call(each_test_data, is_first_time=False)
|
||||||
|
total_result.append(output4)
|
||||||
|
total_result = np.reshape(total_result, [total_result.__len__(), -1])
|
||||||
|
total_result = np.reshape(total_result, [-1, ])
|
||||||
|
|
||||||
|
#误报率,漏报率,准确性的计算
|
||||||
|
|
||||||
|
|
||||||
|
if isSave:
|
||||||
|
|
||||||
|
np.savetxt(save_mse_name, total_result, delimiter=',')
|
||||||
|
if isPlot:
|
||||||
|
plt.figure(1, figsize=(6.0, 2.68))
|
||||||
|
plt.subplots_adjust(left=0.1, right=0.94, bottom=0.2, top=0.9, wspace=None,
|
||||||
|
hspace=None)
|
||||||
|
plt.tight_layout()
|
||||||
|
font1 = {'family': 'Times New Roman', 'weight': 'normal', 'size': 10} # 设置坐标标签的字体大小,字体
|
||||||
|
|
||||||
|
plt.scatter(list(range(total_result.shape[0])), total_result, c='black', s=10)
|
||||||
|
# 画出 y=1 这条水平线
|
||||||
|
plt.axhline(0.5, c='red', label='Failure threshold')
|
||||||
|
# 箭头指向上面的水平线
|
||||||
|
# plt.arrow(35000, 0.9, 33000, 0.75, head_width=0.02, head_length=0.1, shape="full", fc='red', ec='red',
|
||||||
|
# alpha=0.9, overhang=0.5)
|
||||||
|
plt.text(test_data.shape[0] * 2 / 3+1000, 0.7, "Truth Fault", fontsize=10, color='red', verticalalignment='top')
|
||||||
|
plt.axvline(test_data.shape[0] * 2 / 3, c='blue', ls='-.')
|
||||||
|
plt.xticks(range(6),('06/09/17','12/09/17','18/09/17','24/09/17','29/09/17')) # 设置x轴的标尺
|
||||||
|
plt.tick_params() #设置轴显示
|
||||||
|
plt.xlabel("time",fontdict=font1)
|
||||||
|
plt.ylabel("confience",fontdict=font1)
|
||||||
|
plt.text(total_result.shape[0] * 4 / 5, 0.6, "Fault", fontsize=10, color='black', verticalalignment='top',
|
||||||
|
horizontalalignment='center',
|
||||||
|
bbox={'facecolor': 'grey',
|
||||||
|
'pad': 10},fontdict=font1)
|
||||||
|
plt.text(total_result.shape[0] * 1 / 3, 0.4, "Norm", fontsize=10, color='black', verticalalignment='top',
|
||||||
|
horizontalalignment='center',
|
||||||
|
bbox={'facecolor': 'grey',
|
||||||
|
'pad': 10},fontdict=font1)
|
||||||
|
|
||||||
|
plt.grid()
|
||||||
|
# plt.ylim(0, 1)
|
||||||
|
# plt.xlim(-50, 1300)
|
||||||
|
# plt.legend("", loc='upper left')
|
||||||
|
plt.show()
|
||||||
|
return total_result
|
||||||
|
|
||||||
|
|
||||||
|
def get_MSE(data, label, new_model, isStandard: bool = True, isPlot: bool = True, predictI: int = 1):
|
||||||
|
predicted_data1 = []
|
||||||
|
predicted_data2 = []
|
||||||
|
predicted_data3 = []
|
||||||
|
size, length, dims = data.shape
|
||||||
|
for epoch in range(0, size, batch_size):
|
||||||
|
each_test_data = data[epoch:epoch + batch_size, :, :]
|
||||||
|
output1, output2, output3, _ = new_model.call(inputs=each_test_data, is_first_time=True)
|
||||||
|
if epoch == 0:
|
||||||
|
predicted_data1 = output1
|
||||||
|
predicted_data2 = output2
|
||||||
|
predicted_data3 = output3
|
||||||
|
else:
|
||||||
|
predicted_data1 = np.concatenate([predicted_data1, output1], axis=0)
|
||||||
|
predicted_data2 = np.concatenate([predicted_data2, output2], axis=0)
|
||||||
|
predicted_data3 = np.concatenate([predicted_data3, output3], axis=0)
|
||||||
|
|
||||||
|
predicted_data1 = np.reshape(predicted_data1, [-1, 10])
|
||||||
|
predicted_data2 = np.reshape(predicted_data2, [-1, 10])
|
||||||
|
predicted_data3 = np.reshape(predicted_data3, [-1, 10])
|
||||||
|
predict_data = 0
|
||||||
|
|
||||||
|
predict_data = predicted_data1
|
||||||
|
mseList = []
|
||||||
|
meanList = []
|
||||||
|
maxList = []
|
||||||
|
|
||||||
|
for i in range(1, 4):
|
||||||
|
print("i:", i)
|
||||||
|
if i == 1:
|
||||||
|
predict_data = predicted_data1
|
||||||
|
elif i == 2:
|
||||||
|
predict_data = predicted_data2
|
||||||
|
elif i == 3:
|
||||||
|
predict_data = predicted_data3
|
||||||
|
temp = np.abs(predict_data - label)
|
||||||
|
temp1 = (temp - np.broadcast_to(np.mean(temp, axis=0), shape=predict_data.shape))
|
||||||
|
temp2 = np.broadcast_to(np.sqrt(np.var(temp, axis=0)), shape=predict_data.shape)
|
||||||
|
temp3 = temp1 / temp2
|
||||||
|
mse = np.sum((temp1 / temp2) ** 2, axis=1)
|
||||||
|
|
||||||
|
print("mse.shape:", mse.shape)
|
||||||
|
# mse=np.mean((predicted_data-label)**2,axis=1)
|
||||||
|
# print("mse", mse)
|
||||||
|
mseList.append(mse)
|
||||||
|
if isStandard:
|
||||||
|
dims, = mse.shape
|
||||||
|
mean = np.mean(mse)
|
||||||
|
std = np.sqrt(np.var(mse))
|
||||||
|
max = mean + 3 * std
|
||||||
|
print("max.shape:", max.shape)
|
||||||
|
# min = mean-3*std
|
||||||
|
max = np.broadcast_to(max, shape=[dims, ])
|
||||||
|
# min = np.broadcast_to(min,shape=[dims,])
|
||||||
|
mean = np.broadcast_to(mean, shape=[dims, ])
|
||||||
|
if isPlot:
|
||||||
|
plt.figure(random.randint(1, 100))
|
||||||
|
plt.plot(max)
|
||||||
|
plt.plot(mse)
|
||||||
|
plt.plot(mean)
|
||||||
|
# plt.plot(min)
|
||||||
|
plt.show()
|
||||||
|
maxList.append(max)
|
||||||
|
meanList.append(mean)
|
||||||
|
else:
|
||||||
|
if isPlot:
|
||||||
|
plt.figure(random.randint(1, 100))
|
||||||
|
plt.plot(mse)
|
||||||
|
# plt.plot(min)
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
return mseList, meanList, maxList
|
||||||
|
# pass
|
||||||
|
|
||||||
|
|
||||||
|
# healthy_data是健康数据,用于确定阈值,all_data是完整的数据,用于模型出结果
|
||||||
|
def getResult(model: tf.keras.Model, healthy_data, healthy_label, unhealthy_data, unhealthy_label, isPlot: bool = False,
|
||||||
|
isSave: bool = False, predictI: int = 1):
|
||||||
|
# TODO 计算MSE确定阈值
|
||||||
|
# plt.ion()
|
||||||
|
mseList, meanList, maxList = get_MSE(healthy_data, healthy_label, model)
|
||||||
|
mse1List, _, _ = get_MSE(unhealthy_data, unhealthy_label, model, isStandard=False)
|
||||||
|
|
||||||
|
for mse, mean, max, mse1,j in zip(mseList, meanList, maxList, mse1List,range(3)):
|
||||||
|
|
||||||
|
# 误报率的计算
|
||||||
|
total, = mse.shape
|
||||||
|
faultNum = 0
|
||||||
|
faultList = []
|
||||||
|
for i in range(total):
|
||||||
|
if (mse[i] > max[i]):
|
||||||
|
faultNum += 1
|
||||||
|
faultList.append(mse[i])
|
||||||
|
|
||||||
|
fault_rate = faultNum / total
|
||||||
|
print("误报率:", fault_rate)
|
||||||
|
|
||||||
|
# 漏报率计算
|
||||||
|
missNum = 0
|
||||||
|
missList = []
|
||||||
|
all, = mse1.shape
|
||||||
|
for i in range(all):
|
||||||
|
if (mse1[i] < max[0]):
|
||||||
|
missNum += 1
|
||||||
|
missList.append(mse1[i])
|
||||||
|
|
||||||
|
miss_rate = missNum / all
|
||||||
|
print("漏报率:", miss_rate)
|
||||||
|
|
||||||
|
# 总体图
|
||||||
|
print("mse:", mse)
|
||||||
|
print("mse1:", mse1)
|
||||||
|
print("============================================")
|
||||||
|
total_mse = np.concatenate([mse, mse1], axis=0)
|
||||||
|
total_max = np.broadcast_to(max[0], shape=[total_mse.shape[0], ])
|
||||||
|
# min = np.broadcast_to(min,shape=[dims,])
|
||||||
|
total_mean = np.broadcast_to(mean[0], shape=[total_mse.shape[0], ])
|
||||||
|
|
||||||
|
if isSave:
|
||||||
|
save_mse_name1=save_mse_name[:-4]+"_predict"+str(j+1)+".csv"
|
||||||
|
save_max_name1=save_max_name[:-4]+"_predict"+str(j+1)+".csv"
|
||||||
|
|
||||||
|
np.savetxt(save_mse_name1,total_mse, delimiter=',')
|
||||||
|
np.savetxt(save_max_name1,total_max, delimiter=',')
|
||||||
|
|
||||||
|
|
||||||
|
plt.figure(random.randint(1, 100))
|
||||||
|
plt.plot(total_max)
|
||||||
|
plt.plot(total_mse)
|
||||||
|
plt.plot(total_mean)
|
||||||
|
# plt.plot(min)
|
||||||
|
plt.show()
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
total_data = loadData.execute(N=feature_num, file_name=file_name)
|
||||||
|
total_data = normalization(data=total_data)
|
||||||
|
train_data_healthy, train_label1_healthy, train_label2_healthy = get_training_data_overlapping(
|
||||||
|
total_data[:healthy_date, :], is_Healthy=True)
|
||||||
|
train_data_unhealthy, train_label1_unhealthy, train_label2_unhealthy = get_training_data_overlapping(
|
||||||
|
total_data[healthy_date - time_stamp + unhealthy_patience:unhealthy_date, :],
|
||||||
|
is_Healthy=False)
|
||||||
|
#### TODO 第一步训练
|
||||||
|
# 单次测试
|
||||||
|
# train_step_one(train_data=train_data_healthy[:32, :, :], train_label1=train_label1_healthy[:32, :],train_label2=train_label2_healthy[:32, ])
|
||||||
|
# train_step_one(train_data=train_data_healthy, train_label1=train_label1_healthy, train_label2=train_label2_healthy)
|
||||||
|
|
||||||
|
# 导入第一步已经训练好的模型,一个继续训练,一个只输出结果
|
||||||
|
# step_one_model = Joint_Monitoring()
|
||||||
|
# # step_one_model.load_weights(save_name)
|
||||||
|
# #
|
||||||
|
# step_two_model = Joint_Monitoring()
|
||||||
|
# step_two_model.load_weights(save_name)
|
||||||
|
|
||||||
|
#### TODO 第二步训练
|
||||||
|
### healthy_data.shape: (300333,120,10)
|
||||||
|
### unhealthy_data.shape: (16594,10)
|
||||||
|
healthy_size, _, _ = train_data_healthy.shape
|
||||||
|
unhealthy_size, _, _ = train_data_unhealthy.shape
|
||||||
|
train_data, train_label1, train_label2, test_data, test_label1, test_label2 = split_test_data(
|
||||||
|
healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:, :, :],
|
||||||
|
healthy_label1=train_label1_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||||
|
healthy_label2=train_label2_healthy[healthy_size - 2 * unhealthy_size:, ], unhealthy_data=train_data_unhealthy,
|
||||||
|
unhealthy_label1=train_label1_unhealthy, unhealthy_label2=train_label2_unhealthy)
|
||||||
|
# train_step_two(step_one_model=step_one_model, step_two_model=step_two_model,
|
||||||
|
# train_data=train_data,
|
||||||
|
# train_label1=train_label1, train_label2=np.expand_dims(train_label2, axis=-1))
|
||||||
|
|
||||||
|
healthy_size, _, _ = train_data_healthy.shape
|
||||||
|
unhealthy_size, _, _ = train_data_unhealthy.shape
|
||||||
|
# all_data, _, _ = get_training_data_overlapping(
|
||||||
|
# total_data[healthy_size - 2 * unhealthy_size:unhealthy_date, :], is_Healthy=True)
|
||||||
|
|
||||||
|
##出结果单次测试
|
||||||
|
# getResult(step_one_model,
|
||||||
|
# healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:healthy_size - 2 * unhealthy_size + 200,
|
||||||
|
# :],
|
||||||
|
# healthy_label=train_label1_healthy[
|
||||||
|
# healthy_size - 2 * unhealthy_size:healthy_size - 2 * unhealthy_size + 200, :],
|
||||||
|
# unhealthy_data=train_data_unhealthy[:200, :], unhealthy_label=train_label1_unhealthy[:200, :],isSave=True)
|
||||||
|
|
||||||
|
### TODO 测试测试集
|
||||||
|
# step_one_model = Joint_Monitoring()
|
||||||
|
# step_one_model.load_weights(save_name)
|
||||||
|
step_two_model = Joint_Monitoring()
|
||||||
|
step_two_model.load_weights(save_step_two_name)
|
||||||
|
# test(step_one_model=step_one_model, step_two_model=step_two_model, test_data=test_data, test_label1=test_label1,
|
||||||
|
# test_label2=np.expand_dims(test_label2, axis=-1))
|
||||||
|
|
||||||
|
###TODO 展示全部的结果
|
||||||
|
all_data, _, _ = get_training_data_overlapping(
|
||||||
|
total_data[healthy_size - 2 * unhealthy_size:unhealthy_date, :], is_Healthy=True)
|
||||||
|
# all_data = np.concatenate([])
|
||||||
|
# 单次测试
|
||||||
|
# showResult(step_two_model, test_data=all_data[:32], isPlot=True)
|
||||||
|
showResult(step_two_model, test_data=all_data, isPlot=True)
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
@ -675,11 +675,11 @@ if __name__ == '__main__':
|
||||||
### unhealthy_data.shape: (16594,10)
|
### unhealthy_data.shape: (16594,10)
|
||||||
# healthy_size, _, _ = train_data_healthy.shape
|
# healthy_size, _, _ = train_data_healthy.shape
|
||||||
# unhealthy_size, _, _ = train_data_unhealthy.shape
|
# unhealthy_size, _, _ = train_data_unhealthy.shape
|
||||||
# train_data, train_label1, train_label2, test_data, test_label1, test_label2 = split_test_data(
|
train_data, train_label1, train_label2, test_data, test_label1, test_label2 = split_test_data(
|
||||||
# healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:, :, :],
|
healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:, :, :],
|
||||||
# healthy_label1=train_label1_healthy[healthy_size - 2 * unhealthy_size:, :],
|
healthy_label1=train_label1_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||||
# healthy_label2=train_label2_healthy[healthy_size - 2 * unhealthy_size:, ], unhealthy_data=train_data_unhealthy,
|
healthy_label2=train_label2_healthy[healthy_size - 2 * unhealthy_size:, ], unhealthy_data=train_data_unhealthy,
|
||||||
# unhealthy_label1=train_label1_unhealthy, unhealthy_label2=train_label2_unhealthy)
|
unhealthy_label1=train_label1_unhealthy, unhealthy_label2=train_label2_unhealthy)
|
||||||
# train_step_two(step_one_model=step_one_model, step_two_model=step_two_model,
|
# train_step_two(step_one_model=step_one_model, step_two_model=step_two_model,
|
||||||
# train_data=train_data,
|
# train_data=train_data,
|
||||||
# train_label1=train_label1, train_label2=np.expand_dims(train_label2, axis=-1))
|
# train_label1=train_label1, train_label2=np.expand_dims(train_label2, axis=-1))
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,687 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# coding: utf-8
|
||||||
|
|
||||||
|
'''
|
||||||
|
@Author : dingjiawen
|
||||||
|
@Date : 2022/10/11 18:53
|
||||||
|
@Usage :
|
||||||
|
@Desc : Rnet-SE模型对比
|
||||||
|
'''
|
||||||
|
|
||||||
|
import tensorflow as tf
|
||||||
|
import tensorflow.keras
|
||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
from model.DepthwiseCon1D.DepthwiseConv1D import DepthwiseConv1D
|
||||||
|
from model.Dynamic_channelAttention.Dynamic_channelAttention import DynamicChannelAttention
|
||||||
|
from condition_monitoring.data_deal import loadData
|
||||||
|
from model.Joint_Monitoring.compare.RNet_S import Joint_Monitoring
|
||||||
|
|
||||||
|
from model.CommonFunction.CommonFunction import *
|
||||||
|
from sklearn.model_selection import train_test_split
|
||||||
|
from tensorflow.keras.models import load_model, save_model
|
||||||
|
import random
|
||||||
|
|
||||||
|
'''超参数设置'''
|
||||||
|
time_stamp = 120
|
||||||
|
feature_num = 10
|
||||||
|
batch_size = 16
|
||||||
|
learning_rate = 0.001
|
||||||
|
EPOCH = 101
|
||||||
|
model_name = "RNet_S"
|
||||||
|
'''EWMA超参数'''
|
||||||
|
K = 18
|
||||||
|
namuda = 0.01
|
||||||
|
'''保存名称'''
|
||||||
|
# save_name = "E:\self_example\TensorFlow_eaxmple\Model_train_test\condition_monitoring\hard_model\weight\joint_timestamp120_feature10_weight_epoch11_0.0077/weight"
|
||||||
|
save_name = "./model/weight/{0}/weight".format(model_name,
|
||||||
|
time_stamp,
|
||||||
|
feature_num,
|
||||||
|
batch_size,
|
||||||
|
EPOCH)
|
||||||
|
save_step_two_name = "../hard_model/two_weight/{0}_timestamp{1}_feature{2}_weight_epoch14/weight".format(model_name,
|
||||||
|
time_stamp,
|
||||||
|
feature_num,
|
||||||
|
batch_size,
|
||||||
|
EPOCH)
|
||||||
|
|
||||||
|
save_mse_name = "./mse/RNet_S/{0}_timestamp{1}_feature{2}_mse.csv".format(model_name,
|
||||||
|
time_stamp,
|
||||||
|
feature_num,
|
||||||
|
batch_size,
|
||||||
|
EPOCH)
|
||||||
|
save_max_name = "./mse/RNet_S/{0}_timestamp{1}_feature{2}_max.csv".format(model_name,
|
||||||
|
time_stamp,
|
||||||
|
feature_num,
|
||||||
|
batch_size,
|
||||||
|
EPOCH)
|
||||||
|
|
||||||
|
# save_name = "../model/joint/{0}_timestamp{1}_feature{2}.h5".format(model_name,
|
||||||
|
# time_stamp,
|
||||||
|
# feature_num,
|
||||||
|
# batch_size,
|
||||||
|
# EPOCH)
|
||||||
|
# save_step_two_name = "../model/joint_two/{0}_timestamp{1}_feature{2}.h5".format(model_name,
|
||||||
|
# time_stamp,
|
||||||
|
# feature_num,
|
||||||
|
# batch_size,
|
||||||
|
# EPOCH)
|
||||||
|
'''文件名'''
|
||||||
|
file_name = "G:\data\SCADA数据\jb4q_8_delete_total_zero.csv"
|
||||||
|
|
||||||
|
'''
|
||||||
|
文件说明:jb4q_8_delete_total_zero.csv是删除了只删除了全是0的列的文件
|
||||||
|
文件从0:415548行均是正常值(2019/7.30 00:00:00 - 2019/9/18 11:14:00)
|
||||||
|
从415549:432153行均是异常值(2019/9/18 11:21:01 - 2021/1/18 00:00:00)
|
||||||
|
'''
|
||||||
|
'''文件参数'''
|
||||||
|
# 最后正常的时间点
|
||||||
|
healthy_date = 415548
|
||||||
|
# 最后异常的时间点
|
||||||
|
unhealthy_date = 432153
|
||||||
|
# 异常容忍程度
|
||||||
|
unhealthy_patience = 5
|
||||||
|
|
||||||
|
|
||||||
|
def remove(data, time_stamp=time_stamp):
|
||||||
|
rows, cols = data.shape
|
||||||
|
print("remove_data.shape:", data.shape)
|
||||||
|
num = int(rows / time_stamp)
|
||||||
|
|
||||||
|
return data[:num * time_stamp, :]
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# 不重叠采样
|
||||||
|
def get_training_data(data, time_stamp: int = time_stamp):
|
||||||
|
removed_data = remove(data=data)
|
||||||
|
rows, cols = removed_data.shape
|
||||||
|
print("removed_data.shape:", data.shape)
|
||||||
|
print("removed_data:", removed_data)
|
||||||
|
train_data = np.reshape(removed_data, [-1, time_stamp, cols])
|
||||||
|
print("train_data:", train_data)
|
||||||
|
batchs, time_stamp, cols = train_data.shape
|
||||||
|
|
||||||
|
for i in range(1, batchs):
|
||||||
|
each_label = np.expand_dims(train_data[i, 0, :], axis=0)
|
||||||
|
if i == 1:
|
||||||
|
train_label = each_label
|
||||||
|
else:
|
||||||
|
train_label = np.concatenate([train_label, each_label], axis=0)
|
||||||
|
|
||||||
|
print("train_data.shape:", train_data.shape)
|
||||||
|
print("train_label.shape", train_label.shape)
|
||||||
|
return train_data[:-1, :], train_label
|
||||||
|
|
||||||
|
|
||||||
|
# 重叠采样
|
||||||
|
def get_training_data_overlapping(data, time_stamp: int = time_stamp, is_Healthy: bool = True):
|
||||||
|
rows, cols = data.shape
|
||||||
|
train_data = np.empty(shape=[rows - time_stamp - 1, time_stamp, cols])
|
||||||
|
train_label = np.empty(shape=[rows - time_stamp - 1, cols])
|
||||||
|
for i in range(rows):
|
||||||
|
if i + time_stamp >= rows:
|
||||||
|
break
|
||||||
|
if i + time_stamp < rows - 1:
|
||||||
|
train_data[i] = data[i:i + time_stamp]
|
||||||
|
train_label[i] = data[i + time_stamp]
|
||||||
|
|
||||||
|
print("重叠采样以后:")
|
||||||
|
print("data:", train_data) # (300334,120,10)
|
||||||
|
print("label:", train_label) # (300334,10)
|
||||||
|
|
||||||
|
if is_Healthy:
|
||||||
|
train_label2 = np.ones(shape=[train_label.shape[0]])
|
||||||
|
else:
|
||||||
|
train_label2 = np.zeros(shape=[train_label.shape[0]])
|
||||||
|
|
||||||
|
print("label2:", train_label2)
|
||||||
|
|
||||||
|
return train_data, train_label, train_label2
|
||||||
|
|
||||||
|
|
||||||
|
# RepConv重参数化卷积
|
||||||
|
def RepConv(input_tensor, k=3):
|
||||||
|
_, _, output_dim = input_tensor.shape
|
||||||
|
conv1 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=k, strides=1, padding='SAME')(input_tensor)
|
||||||
|
b1 = tf.keras.layers.BatchNormalization()(conv1)
|
||||||
|
|
||||||
|
conv2 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=1, strides=1, padding='SAME')(input_tensor)
|
||||||
|
b2 = tf.keras.layers.BatchNormalization()(conv2)
|
||||||
|
|
||||||
|
b3 = tf.keras.layers.BatchNormalization()(input_tensor)
|
||||||
|
|
||||||
|
out = tf.keras.layers.Add()([b1, b2, b3])
|
||||||
|
out = tf.nn.relu(out)
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
# RepBlock模块
|
||||||
|
def RepBlock(input_tensor, num: int = 3):
|
||||||
|
for i in range(num):
|
||||||
|
input_tensor = RepConv(input_tensor)
|
||||||
|
return input_tensor
|
||||||
|
|
||||||
|
|
||||||
|
# GAP 全局平均池化
|
||||||
|
def Global_avg_channelAttention(input_tensor):
|
||||||
|
_, length, channel = input_tensor.shape
|
||||||
|
DWC1 = DepthwiseConv1D(kernel_size=1, padding='SAME')(input_tensor)
|
||||||
|
GAP = tf.keras.layers.GlobalAvgPool1D()(DWC1)
|
||||||
|
c1 = tf.keras.layers.Conv1D(filters=channel, kernel_size=1, padding='SAME')(GAP)
|
||||||
|
s1 = tf.nn.sigmoid(c1)
|
||||||
|
output = tf.multiply(input_tensor, s1)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
# GDP 全局动态池化
|
||||||
|
def Global_Dynamic_channelAttention(input_tensor):
|
||||||
|
_, length, channel = input_tensor.shape
|
||||||
|
DWC1 = DepthwiseConv1D(kernel_size=1, padding='SAME')(input_tensor)
|
||||||
|
|
||||||
|
# GAP
|
||||||
|
GAP = tf.keras.layers.GlobalAvgPool1D()(DWC1)
|
||||||
|
c1 = tf.keras.layers.Conv1D(filters=channel, kernel_size=1, padding='SAME')(GAP)
|
||||||
|
s1 = tf.nn.sigmoid(c1)
|
||||||
|
|
||||||
|
# GMP
|
||||||
|
GMP = tf.keras.layers.GlobalMaxPool1D()(DWC1)
|
||||||
|
c2 = tf.keras.layers.Conv1D(filters=channel, kernel_size=1, padding='SAME')(GMP)
|
||||||
|
s3 = tf.nn.sigmoid(c2)
|
||||||
|
|
||||||
|
output = tf.multiply(input_tensor, s1)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
# 归一化
|
||||||
|
def normalization(data):
|
||||||
|
rows, cols = data.shape
|
||||||
|
print("归一化之前:", data)
|
||||||
|
print(data.shape)
|
||||||
|
print("======================")
|
||||||
|
|
||||||
|
# 归一化
|
||||||
|
max = np.max(data, axis=0)
|
||||||
|
max = np.broadcast_to(max, [rows, cols])
|
||||||
|
min = np.min(data, axis=0)
|
||||||
|
min = np.broadcast_to(min, [rows, cols])
|
||||||
|
|
||||||
|
data = (data - min) / (max - min)
|
||||||
|
print("归一化之后:", data)
|
||||||
|
print(data.shape)
|
||||||
|
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
# 正则化
|
||||||
|
def Regularization(data):
|
||||||
|
rows, cols = data.shape
|
||||||
|
print("正则化之前:", data)
|
||||||
|
print(data.shape)
|
||||||
|
print("======================")
|
||||||
|
|
||||||
|
# 正则化
|
||||||
|
mean = np.mean(data, axis=0)
|
||||||
|
mean = np.broadcast_to(mean, shape=[rows, cols])
|
||||||
|
dst = np.sqrt(np.var(data, axis=0))
|
||||||
|
dst = np.broadcast_to(dst, shape=[rows, cols])
|
||||||
|
data = (data - mean) / dst
|
||||||
|
print("正则化之后:", data)
|
||||||
|
print(data.shape)
|
||||||
|
|
||||||
|
return data
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def EWMA(data, K=K, namuda=namuda):
|
||||||
|
# t是啥暂时未知
|
||||||
|
t = 0
|
||||||
|
mid = np.mean(data, axis=0)
|
||||||
|
standard = np.sqrt(np.var(data, axis=0))
|
||||||
|
UCL = mid + K * standard * np.sqrt(namuda / (2 - namuda) * (1 - (1 - namuda) ** 2 * t))
|
||||||
|
LCL = mid - K * standard * np.sqrt(namuda / (2 - namuda) * (1 - (1 - namuda) ** 2 * t))
|
||||||
|
return mid, UCL, LCL
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def condition_monitoring_model():
|
||||||
|
input = tf.keras.Input(shape=[time_stamp, feature_num])
|
||||||
|
conv1 = tf.keras.layers.Conv1D(filters=256, kernel_size=1)(input)
|
||||||
|
GRU1 = tf.keras.layers.GRU(128, return_sequences=False)(conv1)
|
||||||
|
d1 = tf.keras.layers.Dense(300)(GRU1)
|
||||||
|
output = tf.keras.layers.Dense(10)(d1)
|
||||||
|
|
||||||
|
model = tf.keras.Model(inputs=input, outputs=output)
|
||||||
|
|
||||||
|
return model
|
||||||
|
|
||||||
|
|
||||||
|
# trian_data:(300455,120,10)
|
||||||
|
# trian_label1:(300455,10)
|
||||||
|
# trian_label2:(300455,)
|
||||||
|
def shuffle(train_data, train_label1, train_label2, is_split: bool = False, split_size: float = 0.2):
|
||||||
|
(train_data, test_data, train_label1, test_label1, train_label2, test_label2) = train_test_split(train_data,
|
||||||
|
train_label1,
|
||||||
|
train_label2,
|
||||||
|
test_size=split_size,
|
||||||
|
shuffle=True,
|
||||||
|
random_state=100)
|
||||||
|
if is_split:
|
||||||
|
return train_data, train_label1, train_label2, test_data, test_label1, test_label2
|
||||||
|
train_data = np.concatenate([train_data, test_data], axis=0)
|
||||||
|
train_label1 = np.concatenate([train_label1, test_label1], axis=0)
|
||||||
|
train_label2 = np.concatenate([train_label2, test_label2], axis=0)
|
||||||
|
# print(train_data.shape)
|
||||||
|
# print(train_label1.shape)
|
||||||
|
# print(train_label2.shape)
|
||||||
|
# print(train_data.shape)
|
||||||
|
|
||||||
|
return train_data, train_label1, train_label2
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def split_test_data(healthy_data, healthy_label1, healthy_label2, unhealthy_data, unhealthy_label1, unhealthy_label2,
|
||||||
|
split_size: float = 0.2, shuffle: bool = True):
|
||||||
|
data = np.concatenate([healthy_data, unhealthy_data], axis=0)
|
||||||
|
label1 = np.concatenate([healthy_label1, unhealthy_label1], axis=0)
|
||||||
|
label2 = np.concatenate([healthy_label2, unhealthy_label2], axis=0)
|
||||||
|
(train_data, test_data, train_label1, test_label1, train_label2, test_label2) = train_test_split(data,
|
||||||
|
label1,
|
||||||
|
label2,
|
||||||
|
test_size=split_size,
|
||||||
|
shuffle=shuffle,
|
||||||
|
random_state=100)
|
||||||
|
|
||||||
|
# print(train_data.shape)
|
||||||
|
# print(train_label1.shape)
|
||||||
|
# print(train_label2.shape)
|
||||||
|
# print(train_data.shape)
|
||||||
|
|
||||||
|
return train_data, train_label1, train_label2, test_data, test_label1, test_label2
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# trian_data:(300455,120,10)
|
||||||
|
# trian_label1:(300455,10)
|
||||||
|
# trian_label2:(300455,)
|
||||||
|
def train_step_one(train_data, train_label1, train_label2):
|
||||||
|
model = Joint_Monitoring()
|
||||||
|
# # # # TODO 需要运行编译一次,才能打印model.summary()
|
||||||
|
# model.build(input_shape=(batch_size, filter_num, dims))
|
||||||
|
# model.summary()
|
||||||
|
history_loss = []
|
||||||
|
history_val_loss = []
|
||||||
|
learning_rate = 1e-3
|
||||||
|
for epoch in range(EPOCH):
|
||||||
|
|
||||||
|
print()
|
||||||
|
print("EPOCH:", epoch, "/", EPOCH, ":")
|
||||||
|
train_data, train_label1, train_label2 = shuffle(train_data, train_label1, train_label2)
|
||||||
|
if epoch == 0:
|
||||||
|
train_data, train_label1, train_label2, val_data, val_label1, val_label2 = shuffle(train_data, train_label1,
|
||||||
|
train_label2,
|
||||||
|
is_split=True)
|
||||||
|
# print()
|
||||||
|
# print("EPOCH:", epoch, "/", EPOCH, ":")
|
||||||
|
# 用于让train知道,这是这个epoch中的第几次训练
|
||||||
|
z = 0
|
||||||
|
# 用于batch_size次再训练
|
||||||
|
k = 1
|
||||||
|
for data_1, label_1, label_2 in zip(train_data, train_label1, train_label2):
|
||||||
|
size, _, _ = train_data.shape
|
||||||
|
data_1 = tf.expand_dims(data_1, axis=0)
|
||||||
|
label_1 = tf.expand_dims(label_1, axis=0)
|
||||||
|
label_2 = tf.expand_dims(label_2, axis=0)
|
||||||
|
if batch_size != 1:
|
||||||
|
if k % batch_size == 1:
|
||||||
|
data = data_1
|
||||||
|
label1 = label_1
|
||||||
|
label2 = label_2
|
||||||
|
else:
|
||||||
|
data = tf.concat([data, data_1], axis=0)
|
||||||
|
label1 = tf.concat([label1, label_1], axis=0)
|
||||||
|
label2 = tf.concat([label2, label_2], axis=0)
|
||||||
|
else:
|
||||||
|
data = data_1
|
||||||
|
label1 = label_1
|
||||||
|
label2 = label_2
|
||||||
|
|
||||||
|
if k % batch_size == 0:
|
||||||
|
# label = tf.expand_dims(label, axis=-1)
|
||||||
|
loss_value, accuracy_value = model.train(input_tensor=data, label1=label1, label2=label2,
|
||||||
|
learning_rate=learning_rate,
|
||||||
|
is_first_time=True)
|
||||||
|
print(z * batch_size, "/", size, ":===============>", "loss:", loss_value.numpy())
|
||||||
|
k = 0
|
||||||
|
z = z + 1
|
||||||
|
k = k + 1
|
||||||
|
val_loss, val_accuracy = model.get_val_loss(val_data=val_data, val_label1=val_label1, val_label2=val_label2,
|
||||||
|
is_first_time=True)
|
||||||
|
SaveBestModel(model=model, save_name=save_name, history_loss=history_val_loss, loss_value=val_loss.numpy())
|
||||||
|
# SaveBestH5Model(model=model, save_name=save_name, history_loss=history_val_loss, loss_value=val_loss.numpy())
|
||||||
|
history_val_loss.append(val_loss)
|
||||||
|
history_loss.append(loss_value.numpy())
|
||||||
|
print('Training loss is :', loss_value.numpy())
|
||||||
|
print('Validating loss is :', val_loss.numpy())
|
||||||
|
if IsStopTraining(history_loss=history_val_loss, patience=7):
|
||||||
|
break
|
||||||
|
if Is_Reduce_learning_rate(history_loss=history_val_loss, patience=3):
|
||||||
|
if learning_rate >= 1e-4:
|
||||||
|
learning_rate = learning_rate * 0.1
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def train_step_two(step_one_model, step_two_model, train_data, train_label1, train_label2):
|
||||||
|
# step_two_model = Joint_Monitoring()
|
||||||
|
# step_two_model.build(input_shape=(batch_size, time_stamp, feature_num))
|
||||||
|
# step_two_model.summary()
|
||||||
|
history_loss = []
|
||||||
|
history_val_loss = []
|
||||||
|
history_accuracy = []
|
||||||
|
learning_rate = 1e-3
|
||||||
|
for epoch in range(EPOCH):
|
||||||
|
print()
|
||||||
|
print("EPOCH:", epoch, "/", EPOCH, ":")
|
||||||
|
train_data, train_label1, train_label2 = shuffle(train_data, train_label1, train_label2)
|
||||||
|
if epoch == 0:
|
||||||
|
train_data, train_label1, train_label2, val_data, val_label1, val_label2 = shuffle(train_data, train_label1,
|
||||||
|
train_label2,
|
||||||
|
is_split=True)
|
||||||
|
# print()
|
||||||
|
# print("EPOCH:", epoch, "/", EPOCH, ":")
|
||||||
|
# 用于让train知道,这是这个epoch中的第几次训练
|
||||||
|
z = 0
|
||||||
|
# 用于batch_size次再训练
|
||||||
|
k = 1
|
||||||
|
accuracy_num = 0
|
||||||
|
for data_1, label_1, label_2 in zip(train_data, train_label1, train_label2):
|
||||||
|
size, _, _ = train_data.shape
|
||||||
|
data_1 = tf.expand_dims(data_1, axis=0)
|
||||||
|
label_1 = tf.expand_dims(label_1, axis=0)
|
||||||
|
label_2 = tf.expand_dims(label_2, axis=0)
|
||||||
|
if batch_size != 1:
|
||||||
|
if k % batch_size == 1:
|
||||||
|
data = data_1
|
||||||
|
label1 = label_1
|
||||||
|
label2 = label_2
|
||||||
|
else:
|
||||||
|
data = tf.concat([data, data_1], axis=0)
|
||||||
|
label1 = tf.concat([label1, label_1], axis=0)
|
||||||
|
label2 = tf.concat([label2, label_2], axis=0)
|
||||||
|
else:
|
||||||
|
data = data_1
|
||||||
|
label1 = label_1
|
||||||
|
label2 = label_2
|
||||||
|
|
||||||
|
if k % batch_size == 0:
|
||||||
|
# label = tf.expand_dims(label, axis=-1)
|
||||||
|
output1, output2, output3, _ = step_one_model.call(inputs=data, is_first_time=True)
|
||||||
|
loss_value, accuracy_value = step_two_model.train(input_tensor=data, label1=label1, label2=label2,
|
||||||
|
learning_rate=learning_rate,
|
||||||
|
is_first_time=False, pred_3=output1, pred_4=output2,
|
||||||
|
pred_5=output3)
|
||||||
|
accuracy_num += accuracy_value
|
||||||
|
print(z * batch_size, "/", size, ":===============>", "loss:", loss_value.numpy(), "| accuracy:",
|
||||||
|
accuracy_num / ((z + 1) * batch_size))
|
||||||
|
k = 0
|
||||||
|
z = z + 1
|
||||||
|
k = k + 1
|
||||||
|
|
||||||
|
val_loss, val_accuracy = step_two_model.get_val_loss(val_data=val_data, val_label1=val_label1,
|
||||||
|
val_label2=val_label2,
|
||||||
|
is_first_time=False, step_one_model=step_one_model)
|
||||||
|
SaveBestModelByAccuracy(model=step_two_model, save_name=save_step_two_name, history_accuracy=history_accuracy,
|
||||||
|
accuracy_value=val_accuracy)
|
||||||
|
history_val_loss.append(val_loss)
|
||||||
|
history_loss.append(loss_value.numpy())
|
||||||
|
history_accuracy.append(val_accuracy)
|
||||||
|
print('Training loss is : {0} | Training accuracy is : {1}'.format(loss_value.numpy(),
|
||||||
|
accuracy_num / ((z + 1) * batch_size)))
|
||||||
|
print('Validating loss is : {0} | Validating accuracy is : {1}'.format(val_loss.numpy(), val_accuracy))
|
||||||
|
if IsStopTraining(history_loss=history_val_loss, patience=7):
|
||||||
|
break
|
||||||
|
if Is_Reduce_learning_rate(history_loss=history_val_loss, patience=3):
|
||||||
|
if learning_rate >= 1e-4:
|
||||||
|
learning_rate = learning_rate * 0.1
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def test(step_one_model, step_two_model, test_data, test_label1, test_label2):
|
||||||
|
history_loss = []
|
||||||
|
history_val_loss = []
|
||||||
|
|
||||||
|
val_loss, val_accuracy = step_two_model.get_val_loss(val_data=test_data, val_label1=test_label1,
|
||||||
|
val_label2=test_label2,
|
||||||
|
is_first_time=False, step_one_model=step_one_model)
|
||||||
|
|
||||||
|
history_val_loss.append(val_loss)
|
||||||
|
print("val_accuracy:", val_accuracy)
|
||||||
|
print("val_loss:", val_loss)
|
||||||
|
|
||||||
|
|
||||||
|
def showResult(step_two_model: Joint_Monitoring, test_data, isPlot: bool = False):
|
||||||
|
# 获取模型的所有参数的个数
|
||||||
|
# step_two_model.count_params()
|
||||||
|
total_result = []
|
||||||
|
size, length, dims = test_data.shape
|
||||||
|
for epoch in range(0, size - batch_size + 1, batch_size):
|
||||||
|
each_test_data = test_data[epoch:epoch + batch_size, :, :]
|
||||||
|
_, _, _, output4 = step_two_model.call(each_test_data, is_first_time=False)
|
||||||
|
total_result.append(output4)
|
||||||
|
total_result = np.reshape(total_result, [total_result.__len__(), -1])
|
||||||
|
total_result = np.reshape(total_result, [-1, ])
|
||||||
|
if isPlot:
|
||||||
|
plt.scatter(list(range(total_result.shape[0])), total_result, c='black', s=10)
|
||||||
|
# 画出 y=1 这条水平线
|
||||||
|
plt.axhline(0.5, c='red', label='Failure threshold')
|
||||||
|
# 箭头指向上面的水平线
|
||||||
|
# plt.arrow(35000, 0.9, 33000, 0.75, head_width=0.02, head_length=0.1, shape="full", fc='red', ec='red',
|
||||||
|
# alpha=0.9, overhang=0.5)
|
||||||
|
# plt.text(35000, 0.9, "Truth Fault", fontsize=10, color='black', verticalalignment='top')
|
||||||
|
plt.axvline(test_data.shape[0] * 2 / 3, c='blue', ls='-.')
|
||||||
|
plt.xlabel("time")
|
||||||
|
plt.ylabel("confience")
|
||||||
|
plt.text(total_result.shape[0] * 4 / 5, 0.6, "Fault", fontsize=10, color='black', verticalalignment='top',
|
||||||
|
horizontalalignment='center',
|
||||||
|
bbox={'facecolor': 'grey',
|
||||||
|
'pad': 10})
|
||||||
|
plt.text(total_result.shape[0] * 1 / 3, 0.4, "Norm", fontsize=10, color='black', verticalalignment='top',
|
||||||
|
horizontalalignment='center',
|
||||||
|
bbox={'facecolor': 'grey',
|
||||||
|
'pad': 10})
|
||||||
|
plt.grid()
|
||||||
|
# plt.ylim(0, 1)
|
||||||
|
# plt.xlim(-50, 1300)
|
||||||
|
# plt.legend("", loc='upper left')
|
||||||
|
plt.show()
|
||||||
|
return total_result
|
||||||
|
|
||||||
|
|
||||||
|
def get_MSE(data, label, new_model, isStandard: bool = True, isPlot: bool = True, predictI: int = 1):
|
||||||
|
predicted_data1 = []
|
||||||
|
predicted_data2 = []
|
||||||
|
predicted_data3 = []
|
||||||
|
size, length, dims = data.shape
|
||||||
|
for epoch in range(0, size, batch_size):
|
||||||
|
each_test_data = data[epoch:epoch + batch_size, :, :]
|
||||||
|
output1, output2, output3, _ = new_model.call(inputs=each_test_data, is_first_time=True)
|
||||||
|
if epoch == 0:
|
||||||
|
predicted_data1 = output1
|
||||||
|
predicted_data2 = output2
|
||||||
|
predicted_data3 = output3
|
||||||
|
else:
|
||||||
|
predicted_data1 = np.concatenate([predicted_data1, output1], axis=0)
|
||||||
|
predicted_data2 = np.concatenate([predicted_data2, output2], axis=0)
|
||||||
|
predicted_data3 = np.concatenate([predicted_data3, output3], axis=0)
|
||||||
|
|
||||||
|
predicted_data1 = np.reshape(predicted_data1, [-1, 10])
|
||||||
|
predicted_data2 = np.reshape(predicted_data2, [-1, 10])
|
||||||
|
predicted_data3 = np.reshape(predicted_data3, [-1, 10])
|
||||||
|
predict_data = 0
|
||||||
|
|
||||||
|
predict_data = predicted_data1
|
||||||
|
mseList = []
|
||||||
|
meanList = []
|
||||||
|
maxList = []
|
||||||
|
|
||||||
|
for i in range(1, 4):
|
||||||
|
print("i:", i)
|
||||||
|
if i == 1:
|
||||||
|
predict_data = predicted_data1
|
||||||
|
elif i == 2:
|
||||||
|
predict_data = predicted_data2
|
||||||
|
elif i == 3:
|
||||||
|
predict_data = predicted_data3
|
||||||
|
temp = np.abs(predict_data - label)
|
||||||
|
temp1 = (temp - np.broadcast_to(np.mean(temp, axis=0), shape=predict_data.shape))
|
||||||
|
temp2 = np.broadcast_to(np.sqrt(np.var(temp, axis=0)), shape=predict_data.shape)
|
||||||
|
temp3 = temp1 / temp2
|
||||||
|
mse = np.sum((temp1 / temp2) ** 2, axis=1)
|
||||||
|
|
||||||
|
print("mse.shape:", mse.shape)
|
||||||
|
# mse=np.mean((predicted_data-label)**2,axis=1)
|
||||||
|
# print("mse", mse)
|
||||||
|
mseList.append(mse)
|
||||||
|
if isStandard:
|
||||||
|
dims, = mse.shape
|
||||||
|
mean = np.mean(mse)
|
||||||
|
std = np.sqrt(np.var(mse))
|
||||||
|
max = mean + 3 * std
|
||||||
|
print("max.shape:", max.shape)
|
||||||
|
# min = mean-3*std
|
||||||
|
max = np.broadcast_to(max, shape=[dims, ])
|
||||||
|
# min = np.broadcast_to(min,shape=[dims,])
|
||||||
|
mean = np.broadcast_to(mean, shape=[dims, ])
|
||||||
|
if isPlot:
|
||||||
|
plt.figure(random.randint(1, 100))
|
||||||
|
plt.plot(max)
|
||||||
|
plt.plot(mse)
|
||||||
|
plt.plot(mean)
|
||||||
|
# plt.plot(min)
|
||||||
|
plt.show()
|
||||||
|
maxList.append(max)
|
||||||
|
meanList.append(mean)
|
||||||
|
else:
|
||||||
|
if isPlot:
|
||||||
|
plt.figure(random.randint(1, 100))
|
||||||
|
plt.plot(mse)
|
||||||
|
# plt.plot(min)
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
return mseList, meanList, maxList
|
||||||
|
# pass
|
||||||
|
|
||||||
|
|
||||||
|
# healthy_data是健康数据,用于确定阈值,all_data是完整的数据,用于模型出结果
|
||||||
|
def getResult(model: tf.keras.Model, healthy_data, healthy_label, unhealthy_data, unhealthy_label, isPlot: bool = False,
|
||||||
|
isSave: bool = False, predictI: int = 1):
|
||||||
|
# TODO 计算MSE确定阈值
|
||||||
|
# plt.ion()
|
||||||
|
mseList, meanList, maxList = get_MSE(healthy_data, healthy_label, model)
|
||||||
|
mse1List, _, _ = get_MSE(unhealthy_data, unhealthy_label, model, isStandard=False)
|
||||||
|
|
||||||
|
for mse, mean, max, mse1,j in zip(mseList, meanList, maxList, mse1List,range(3)):
|
||||||
|
|
||||||
|
# 误报率的计算
|
||||||
|
total, = mse.shape
|
||||||
|
faultNum = 0
|
||||||
|
faultList = []
|
||||||
|
for i in range(total):
|
||||||
|
if (mse[i] > max[i]):
|
||||||
|
faultNum += 1
|
||||||
|
faultList.append(mse[i])
|
||||||
|
|
||||||
|
fault_rate = faultNum / total
|
||||||
|
print("误报率:", fault_rate)
|
||||||
|
|
||||||
|
# 漏报率计算
|
||||||
|
missNum = 0
|
||||||
|
missList = []
|
||||||
|
all, = mse1.shape
|
||||||
|
for i in range(all):
|
||||||
|
if (mse1[i] < max[0]):
|
||||||
|
missNum += 1
|
||||||
|
missList.append(mse1[i])
|
||||||
|
|
||||||
|
miss_rate = missNum / all
|
||||||
|
print("漏报率:", miss_rate)
|
||||||
|
|
||||||
|
# 总体图
|
||||||
|
print("mse:", mse)
|
||||||
|
print("mse1:", mse1)
|
||||||
|
print("============================================")
|
||||||
|
total_mse = np.concatenate([mse, mse1], axis=0)
|
||||||
|
total_max = np.broadcast_to(max[0], shape=[total_mse.shape[0], ])
|
||||||
|
# min = np.broadcast_to(min,shape=[dims,])
|
||||||
|
total_mean = np.broadcast_to(mean[0], shape=[total_mse.shape[0], ])
|
||||||
|
|
||||||
|
if isSave:
|
||||||
|
save_mse_name1=save_mse_name[:-4]+"_predict"+str(j+1)+".csv"
|
||||||
|
save_max_name1=save_max_name[:-4]+"_predict"+str(j+1)+".csv"
|
||||||
|
|
||||||
|
np.savetxt(save_mse_name1,total_mse, delimiter=',')
|
||||||
|
np.savetxt(save_max_name1,total_max, delimiter=',')
|
||||||
|
|
||||||
|
|
||||||
|
plt.figure(random.randint(1, 100))
|
||||||
|
plt.plot(total_max)
|
||||||
|
plt.plot(total_mse)
|
||||||
|
plt.plot(total_mean)
|
||||||
|
# plt.plot(min)
|
||||||
|
plt.show()
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
total_data = loadData.execute(N=feature_num, file_name=file_name)
|
||||||
|
total_data = normalization(data=total_data)
|
||||||
|
train_data_healthy, train_label1_healthy, train_label2_healthy = get_training_data_overlapping(
|
||||||
|
total_data[:healthy_date, :], is_Healthy=True)
|
||||||
|
train_data_unhealthy, train_label1_unhealthy, train_label2_unhealthy = get_training_data_overlapping(
|
||||||
|
total_data[healthy_date - time_stamp + unhealthy_patience:unhealthy_date, :],
|
||||||
|
is_Healthy=False)
|
||||||
|
#### TODO 第一步训练
|
||||||
|
# 单次测试
|
||||||
|
# train_step_one(train_data=train_data_healthy[:32, :, :], train_label1=train_label1_healthy[:32, :],train_label2=train_label2_healthy[:32, ])
|
||||||
|
train_step_one(train_data=train_data_healthy, train_label1=train_label1_healthy, train_label2=train_label2_healthy)
|
||||||
|
|
||||||
|
|
||||||
|
# 导入第一步已经训练好的模型,一个继续训练,一个只输出结果
|
||||||
|
step_one_model = Joint_Monitoring()
|
||||||
|
step_one_model.load_weights(save_name)
|
||||||
|
#
|
||||||
|
# step_two_model = Joint_Monitoring()
|
||||||
|
# step_two_model.load_weights(save_name)
|
||||||
|
|
||||||
|
healthy_size, _, _ = train_data_healthy.shape
|
||||||
|
unhealthy_size, _, _ = train_data_unhealthy.shape
|
||||||
|
all_data, _, _ = get_training_data_overlapping(
|
||||||
|
total_data[healthy_size - 2 * unhealthy_size:unhealthy_date, :], is_Healthy=True)
|
||||||
|
|
||||||
|
##出结果单次测试
|
||||||
|
# getResult(step_one_model,
|
||||||
|
# healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:healthy_size - 2 * unhealthy_size + 200,
|
||||||
|
# :],
|
||||||
|
# healthy_label=train_label1_healthy[
|
||||||
|
# healthy_size - 2 * unhealthy_size:healthy_size - 2 * unhealthy_size + 200, :],
|
||||||
|
# unhealthy_data=train_data_unhealthy[:200, :], unhealthy_label=train_label1_unhealthy[:200, :],isSave=True)
|
||||||
|
|
||||||
|
getResult(step_one_model, healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||||
|
healthy_label=train_label1_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||||
|
unhealthy_data=train_data_unhealthy, unhealthy_label=train_label1_unhealthy,isSave=True)
|
||||||
|
|
||||||
|
###TODO 展示全部的结果
|
||||||
|
# all_data, _, _ = get_training_data_overlapping(
|
||||||
|
# total_data[healthy_size - 2 * unhealthy_size:unhealthy_date, :], is_Healthy=True)
|
||||||
|
# all_data = np.concatenate([])
|
||||||
|
# 单次测试
|
||||||
|
# showResult(step_two_model, test_data=all_data[:32], isPlot=True)
|
||||||
|
# showResult(step_two_model, test_data=all_data, isPlot=True)
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
@ -0,0 +1,310 @@
|
||||||
|
import tensorflow as tf
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from matplotlib import pyplot as plt
|
||||||
|
from sklearn.manifold import TSNE
|
||||||
|
from sklearn.metrics import confusion_matrix
|
||||||
|
import seaborn as sns
|
||||||
|
from sklearn.model_selection import train_test_split
|
||||||
|
from condition_monitoring.data_deal import loadData
|
||||||
|
from keras.callbacks import EarlyStopping
|
||||||
|
|
||||||
|
'''超参数设置'''
|
||||||
|
time_stamp = 120
|
||||||
|
feature_num = 10
|
||||||
|
batch_size = 16
|
||||||
|
learning_rate = 0.001
|
||||||
|
EPOCH = 101
|
||||||
|
model_name = "ResNet"
|
||||||
|
'''EWMA超参数'''
|
||||||
|
K = 18
|
||||||
|
namuda = 0.01
|
||||||
|
'''保存名称'''
|
||||||
|
|
||||||
|
save_name = "./model//{0}.h5".format(model_name,
|
||||||
|
time_stamp,
|
||||||
|
feature_num,
|
||||||
|
batch_size,
|
||||||
|
EPOCH)
|
||||||
|
save_step_two_name = "./model/two_weight/{0}_weight_epoch6_99899_9996/weight".format(model_name,
|
||||||
|
time_stamp,
|
||||||
|
feature_num,
|
||||||
|
batch_size,
|
||||||
|
EPOCH)
|
||||||
|
|
||||||
|
save_mse_name = "./mse/RNet_C/{0}_timestamp{1}_feature{2}_result.csv".format(model_name,
|
||||||
|
time_stamp,
|
||||||
|
feature_num,
|
||||||
|
batch_size,
|
||||||
|
EPOCH)
|
||||||
|
save_max_name = "./mse/RNet_C/{0}_timestamp{1}_feature{2}_max.csv".format(model_name,
|
||||||
|
time_stamp,
|
||||||
|
feature_num,
|
||||||
|
batch_size,
|
||||||
|
EPOCH)
|
||||||
|
|
||||||
|
'''文件名'''
|
||||||
|
file_name = "G:\data\SCADA数据\jb4q_8_delete_total_zero.csv"
|
||||||
|
|
||||||
|
'''
|
||||||
|
文件说明:jb4q_8_delete_total_zero.csv是删除了只删除了全是0的列的文件
|
||||||
|
文件从0:415548行均是正常值(2019/7.30 00:00:00 - 2019/9/18 11:14:00)
|
||||||
|
从415549:432153行均是异常值(2019/9/18 11:21:01 - 2021/1/18 00:00:00)
|
||||||
|
'''
|
||||||
|
'''文件参数'''
|
||||||
|
# 最后正常的时间点
|
||||||
|
healthy_date = 415548
|
||||||
|
# 最后异常的时间点
|
||||||
|
unhealthy_date = 432153
|
||||||
|
# 异常容忍程度
|
||||||
|
unhealthy_patience = 5
|
||||||
|
|
||||||
|
# 画图相关设置
|
||||||
|
font1 = {'family': 'Times New Roman', 'weight': 'normal', 'size': 10} # 设置坐标标签的字体大小,字体
|
||||||
|
|
||||||
|
|
||||||
|
# train_data = np.load("../../../data/train_data.npy")
|
||||||
|
# train_label = np.load("../../../data/train_label.npy")
|
||||||
|
# test_data = np.load("../../../data/test_data.npy")
|
||||||
|
# test_label = np.load("../../../data/test_label.npy")
|
||||||
|
|
||||||
|
|
||||||
|
# CIFAR_100_data = tf.keras.datasets.cifar100
|
||||||
|
# (train_data, train_label), (test_data, test_label) = CIFAR_100_data.load_data()
|
||||||
|
# train_data=np.array(train_data)
|
||||||
|
# train_label=np.array(train_label)
|
||||||
|
# print(train_data.shape)
|
||||||
|
# print(train_label.shape)
|
||||||
|
# print(train_data)
|
||||||
|
# print(test_data)
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# 重叠采样
|
||||||
|
def get_training_data_overlapping(data, time_stamp: int = time_stamp, is_Healthy: bool = True):
|
||||||
|
rows, cols = data.shape
|
||||||
|
train_data = np.empty(shape=[rows - time_stamp - 1, time_stamp, cols])
|
||||||
|
train_label = np.empty(shape=[rows - time_stamp - 1, cols])
|
||||||
|
for i in range(rows):
|
||||||
|
if i + time_stamp >= rows:
|
||||||
|
break
|
||||||
|
if i + time_stamp < rows - 1:
|
||||||
|
train_data[i] = data[i:i + time_stamp]
|
||||||
|
train_label[i] = data[i + time_stamp]
|
||||||
|
|
||||||
|
print("重叠采样以后:")
|
||||||
|
print("data:", train_data) # (300334,120,10)
|
||||||
|
print("label:", train_label) # (300334,10)
|
||||||
|
|
||||||
|
if is_Healthy:
|
||||||
|
train_label2 = np.ones(shape=[train_label.shape[0]])
|
||||||
|
else:
|
||||||
|
train_label2 = np.zeros(shape=[train_label.shape[0]])
|
||||||
|
|
||||||
|
print("label2:", train_label2)
|
||||||
|
|
||||||
|
return train_data, train_label, train_label2
|
||||||
|
|
||||||
|
|
||||||
|
def shuffle(train_data, train_label1, train_label2, is_split: bool = False, split_size: float = 0.2):
|
||||||
|
(train_data, test_data, train_label1, test_label1, train_label2, test_label2) = train_test_split(train_data,
|
||||||
|
train_label1,
|
||||||
|
train_label2,
|
||||||
|
test_size=split_size,
|
||||||
|
shuffle=True,
|
||||||
|
random_state=100)
|
||||||
|
if is_split:
|
||||||
|
return train_data, train_label1, train_label2, test_data, test_label1, test_label2
|
||||||
|
train_data = np.concatenate([train_data, test_data], axis=0)
|
||||||
|
train_label1 = np.concatenate([train_label1, test_label1], axis=0)
|
||||||
|
train_label2 = np.concatenate([train_label2, test_label2], axis=0)
|
||||||
|
# print(train_data.shape)
|
||||||
|
# print(train_label1.shape)
|
||||||
|
# print(train_label2.shape)
|
||||||
|
# print(train_data.shape)
|
||||||
|
|
||||||
|
return train_data, train_label1, train_label2
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def split_test_data(healthy_data, healthy_label1, healthy_label2, unhealthy_data, unhealthy_label1, unhealthy_label2,
|
||||||
|
split_size: float = 0.2, shuffle: bool = True):
|
||||||
|
data = np.concatenate([healthy_data, unhealthy_data], axis=0)
|
||||||
|
label1 = np.concatenate([healthy_label1, unhealthy_label1], axis=0)
|
||||||
|
label2 = np.concatenate([healthy_label2, unhealthy_label2], axis=0)
|
||||||
|
(train_data, test_data, train_label1, test_label1, train_label2, test_label2) = train_test_split(data,
|
||||||
|
label1,
|
||||||
|
label2,
|
||||||
|
test_size=split_size,
|
||||||
|
shuffle=shuffle,
|
||||||
|
random_state=100)
|
||||||
|
|
||||||
|
# print(train_data.shape)
|
||||||
|
# print(train_label1.shape)
|
||||||
|
# print(train_label2.shape)
|
||||||
|
# print(train_data.shape)
|
||||||
|
|
||||||
|
return train_data, train_label1, train_label2, test_data, test_label1, test_label2
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# 归一化
|
||||||
|
def normalization(data):
|
||||||
|
rows, cols = data.shape
|
||||||
|
print("归一化之前:", data)
|
||||||
|
print(data.shape)
|
||||||
|
print("======================")
|
||||||
|
|
||||||
|
# 归一化
|
||||||
|
max = np.max(data, axis=0)
|
||||||
|
max = np.broadcast_to(max, [rows, cols])
|
||||||
|
min = np.min(data, axis=0)
|
||||||
|
min = np.broadcast_to(min, [rows, cols])
|
||||||
|
|
||||||
|
data = (data - min) / (max - min)
|
||||||
|
print("归一化之后:", data)
|
||||||
|
print(data.shape)
|
||||||
|
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
def identity_block(input_tensor, out_dim):
|
||||||
|
con1 = tf.keras.layers.Conv1D(filters=out_dim, kernel_size=3, padding='SAME', activation=tf.nn.relu)(
|
||||||
|
input_tensor)
|
||||||
|
bhn1 = tf.keras.layers.BatchNormalization()(con1)
|
||||||
|
|
||||||
|
# con2 = tf.keras.layers.Conv1D(filters=out_dim // 4, kernel_size=3, padding='SAME', activation=tf.nn.relu)(bhn1)
|
||||||
|
# bhn2 = tf.keras.layers.BatchNormalization()(con2)
|
||||||
|
|
||||||
|
con3 = tf.keras.layers.Conv1D(filters=out_dim, kernel_size=3, padding='SAME', activation=tf.nn.relu)(bhn1)
|
||||||
|
|
||||||
|
out = tf.keras.layers.Add()([input_tensor, con3])
|
||||||
|
out = tf.nn.relu(out)
|
||||||
|
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
def resnet_Model():
|
||||||
|
inputs = tf.keras.Input(shape=[120, 10])
|
||||||
|
conv1 = tf.keras.layers.Conv1D(filters=20, kernel_size=3, padding='SAME', activation=tf.nn.relu)(inputs)
|
||||||
|
'''第一层'''
|
||||||
|
output_dim = 10
|
||||||
|
identity_1 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=3, padding='SAME', activation=tf.nn.relu)(
|
||||||
|
conv1)
|
||||||
|
identity_1 = tf.keras.layers.BatchNormalization()(identity_1)
|
||||||
|
for _ in range(2):
|
||||||
|
identity_1 = identity_block(identity_1, output_dim)
|
||||||
|
'''第二层'''
|
||||||
|
output_dim = 20
|
||||||
|
identity_2 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=3, padding='SAME', activation=tf.nn.relu)(
|
||||||
|
identity_1)
|
||||||
|
identity_2 = tf.keras.layers.BatchNormalization()(identity_2)
|
||||||
|
for _ in range(2):
|
||||||
|
identity_2 = identity_block(identity_2, output_dim)
|
||||||
|
'''第三层'''
|
||||||
|
output_dim = 20
|
||||||
|
identity_3 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=3, padding='SAME', activation=tf.nn.relu)(
|
||||||
|
identity_2)
|
||||||
|
identity_3 = tf.keras.layers.BatchNormalization()(identity_3)
|
||||||
|
for _ in range(2):
|
||||||
|
identity_3 = identity_block(identity_3, output_dim)
|
||||||
|
'''第四层'''
|
||||||
|
output_dim = 40
|
||||||
|
identity_4 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=3, padding='SAME', activation=tf.nn.relu)(
|
||||||
|
identity_3)
|
||||||
|
identity_4 = tf.keras.layers.BatchNormalization()(identity_4)
|
||||||
|
for _ in range(2):
|
||||||
|
identity_4 = identity_block(identity_4, output_dim)
|
||||||
|
flatten = tf.keras.layers.GlobalAvgPool1D()(identity_4)
|
||||||
|
dropout = tf.keras.layers.Dropout(0.217)(flatten)
|
||||||
|
|
||||||
|
dense = tf.keras.layers.Dense(128, activation=tf.nn.relu)(dropout)
|
||||||
|
dense = tf.keras.layers.BatchNormalization(name="bn_last")(dense)
|
||||||
|
dense = tf.keras.layers.Dense(2, activation=tf.nn.sigmoid)(dense)
|
||||||
|
model = tf.keras.Model(inputs=inputs, outputs=dense)
|
||||||
|
return model
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
# # 数据读入
|
||||||
|
#
|
||||||
|
total_data = loadData.execute(N=feature_num, file_name=file_name)
|
||||||
|
total_data = normalization(data=total_data)
|
||||||
|
train_data_healthy, train_label1_healthy, train_label2_healthy = get_training_data_overlapping(
|
||||||
|
total_data[:healthy_date, :], is_Healthy=True)
|
||||||
|
train_data_unhealthy, train_label1_unhealthy, train_label2_unhealthy = get_training_data_overlapping(
|
||||||
|
total_data[healthy_date - time_stamp + unhealthy_patience:unhealthy_date, :],
|
||||||
|
is_Healthy=False)
|
||||||
|
|
||||||
|
healthy_size, _, _ = train_data_healthy.shape
|
||||||
|
unhealthy_size, _, _ = train_data_unhealthy.shape
|
||||||
|
train_data, train_label1, train_label2, test_data, test_label1, test_label2 = split_test_data(
|
||||||
|
healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:, :, :],
|
||||||
|
healthy_label1=train_label1_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||||
|
healthy_label2=train_label2_healthy[healthy_size - 2 * unhealthy_size:, ], unhealthy_data=train_data_unhealthy,
|
||||||
|
unhealthy_label1=train_label1_unhealthy, unhealthy_label2=train_label2_unhealthy)
|
||||||
|
|
||||||
|
train_label = train_label2
|
||||||
|
test_label = test_label2
|
||||||
|
|
||||||
|
model = resnet_Model()
|
||||||
|
model.compile(optimizer=tf.optimizers.Adam(), loss=tf.losses.binary_crossentropy,
|
||||||
|
metrics=['acc'])
|
||||||
|
model.summary()
|
||||||
|
early_stop = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=5, mode='min', verbose=1)
|
||||||
|
|
||||||
|
checkpoint = tf.keras.callbacks.ModelCheckpoint(
|
||||||
|
filepath=save_name,
|
||||||
|
monitor='val_loss',
|
||||||
|
verbose=1,
|
||||||
|
save_best_only=True,
|
||||||
|
mode='min',
|
||||||
|
period=1)
|
||||||
|
|
||||||
|
history = model.fit(train_data, train_label, epochs=20, batch_size=32, validation_data=(test_data, test_label),
|
||||||
|
callbacks=[checkpoint, early_stop])
|
||||||
|
model.save("./model/ResNet.h5")
|
||||||
|
model = tf.keras.models.load_model("../model/ResNet_model.h5")
|
||||||
|
|
||||||
|
# 结果展示
|
||||||
|
trained_data = tf.keras.models.Model(inputs=model.input, outputs=model.get_layer('bn_last').output).predict(
|
||||||
|
train_data)
|
||||||
|
predict_label = model.predict(test_data)
|
||||||
|
predict_label_max = np.argmax(predict_label, axis=1)
|
||||||
|
predict_label = np.expand_dims(predict_label_max, axis=1)
|
||||||
|
|
||||||
|
confusion_matrix = confusion_matrix(test_label, predict_label)
|
||||||
|
|
||||||
|
tsne = TSNE(n_components=3, verbose=2, perplexity=30, n_iter=5000).fit_transform(trained_data)
|
||||||
|
print("tsne[:,0]", tsne[:, 0])
|
||||||
|
print("tsne[:,1]", tsne[:, 1])
|
||||||
|
print("tsne[:,2]", tsne[:, 2])
|
||||||
|
x, y, z = tsne[:, 0], tsne[:, 1], tsne[:, 2]
|
||||||
|
x = (x - np.min(x)) / (np.max(x) - np.min(x))
|
||||||
|
y = (y - np.min(y)) / (np.max(y) - np.min(y))
|
||||||
|
z = (z - np.min(z)) / (np.max(z) - np.min(z))
|
||||||
|
|
||||||
|
fig1 = plt.figure()
|
||||||
|
ax1 = fig1.add_subplot(projection='3d')
|
||||||
|
ax1.scatter3D(x, y, z, c=train_label, cmap=plt.cm.get_cmap("jet", 10))
|
||||||
|
|
||||||
|
fig2 = plt.figure()
|
||||||
|
ax2 = fig2.add_subplot()
|
||||||
|
sns.heatmap(confusion_matrix, annot=True, fmt="d", cmap='Blues')
|
||||||
|
plt.ylabel('Actual label')
|
||||||
|
plt.xlabel('Predicted label')
|
||||||
|
|
||||||
|
# fig3 = plt.figure()
|
||||||
|
# ax3 = fig3.add_subplot()
|
||||||
|
# plt.plot(history.epoch, history.history.get('acc'), label='acc')
|
||||||
|
# plt.plot(history.epoch, history.history.get('val_acc'), label='val_acc')
|
||||||
|
#
|
||||||
|
# fig4 = plt.figure()
|
||||||
|
# ax4 = fig3.add_subplot()
|
||||||
|
# plt.plot(history.epoch, history.history.get('loss'), label='loss')
|
||||||
|
# plt.plot(history.epoch, history.history.get('val_loss'), label='val_loss')
|
||||||
|
plt.legend()
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
score = model.evaluate(test_data, test_label)
|
||||||
|
print('score:', score)
|
||||||
|
|
@ -4,7 +4,12 @@
|
||||||
|
|
||||||
'''
|
'''
|
||||||
@Author : dingjiawen
|
@Author : dingjiawen
|
||||||
@Date : 2022/10/11 18:53
|
@Date : 2022/10/19 14:34
|
||||||
@Usage :
|
@Usage :
|
||||||
@Desc :
|
@Desc : SVM
|
||||||
'''
|
'''
|
||||||
|
|
||||||
|
import sklearn.svm as svm
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -0,0 +1,129 @@
|
||||||
|
# _*_ coding: UTF-8 _*_
|
||||||
|
|
||||||
|
|
||||||
|
'''
|
||||||
|
@Author : dingjiawen
|
||||||
|
@Date : 2022/7/12 17:48
|
||||||
|
@Usage : 经典SE通道注意力
|
||||||
|
@Desc :
|
||||||
|
'''
|
||||||
|
|
||||||
|
import tensorflow as tf
|
||||||
|
import tensorflow.keras
|
||||||
|
from tensorflow.keras import *
|
||||||
|
import tensorflow.keras.layers as layers
|
||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
from model.DepthwiseCon1D.DepthwiseConv1D import DepthwiseConv1D
|
||||||
|
|
||||||
|
import keras.backend as K
|
||||||
|
|
||||||
|
|
||||||
|
class Between_0_1(tf.keras.constraints.Constraint):
|
||||||
|
def __call__(self, w):
|
||||||
|
# 调用父类__init__()方法
|
||||||
|
super(Between_0_1, self).__init__()
|
||||||
|
return K.clip(w, 0, 1)
|
||||||
|
|
||||||
|
|
||||||
|
class SEChannelAttention(layers.Layer):
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
# 调用父类__init__()方法
|
||||||
|
super(SEChannelAttention, self).__init__()
|
||||||
|
self.DWC = DepthwiseConv1D(kernel_size=1, padding='SAME')
|
||||||
|
# self.DWC = DepthwiseConv1D(kernel_size=1, padding='causal',dilation_rate=4,data_format='channels_last')
|
||||||
|
|
||||||
|
def build(self, input_shape):
|
||||||
|
if len(input_shape) != 3:
|
||||||
|
raise ValueError('Inputs to `DynamicChannelAttention` should have rank 3. '
|
||||||
|
'Received input shape:', str(input_shape))
|
||||||
|
|
||||||
|
print(input_shape)
|
||||||
|
# GAP
|
||||||
|
self.GAP = tf.keras.layers.GlobalAvgPool1D()
|
||||||
|
self.c1 = tf.keras.layers.Conv1D(filters=input_shape[2], kernel_size=1, padding='SAME')
|
||||||
|
# s1 = tf.nn.sigmoid(c1)
|
||||||
|
|
||||||
|
# GMP
|
||||||
|
self.GMP = tf.keras.layers.GlobalMaxPool1D()
|
||||||
|
self.c2 = tf.keras.layers.Conv1D(filters=input_shape[2], kernel_size=1, padding='SAME')
|
||||||
|
# s2 = tf.nn.sigmoid(c2)
|
||||||
|
|
||||||
|
# weight
|
||||||
|
self.weight_kernel = self.add_weight(
|
||||||
|
shape=(1, input_shape[2]),
|
||||||
|
initializer='glorot_uniform',
|
||||||
|
name='weight_kernel')
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def call(self, inputs, **kwargs):
|
||||||
|
batch_size, length, channel = inputs.shape
|
||||||
|
DWC1 = self.DWC(inputs)
|
||||||
|
|
||||||
|
# GAP
|
||||||
|
GAP = self.GAP(DWC1)
|
||||||
|
GAP = tf.expand_dims(GAP, axis=1)
|
||||||
|
c1 = self.c1(GAP)
|
||||||
|
# c1 = tf.keras.layers.BatchNormalization()(c1)
|
||||||
|
# s1 = tf.nn.sigmoid(c1)
|
||||||
|
|
||||||
|
# # GMP
|
||||||
|
# GMP = self.GMP(DWC1)
|
||||||
|
# GMP = tf.expand_dims(GMP, axis=1)
|
||||||
|
# c2 = self.c2(GMP)
|
||||||
|
# c2 = tf.keras.layers.BatchNormalization()(c2)
|
||||||
|
# s2 = tf.nn.sigmoid(c2)
|
||||||
|
|
||||||
|
# print(self.weight_kernel)
|
||||||
|
|
||||||
|
weight_kernel = tf.broadcast_to(self.weight_kernel, shape=[length, channel])
|
||||||
|
weight_kernel = tf.broadcast_to(weight_kernel, shape=[batch_size, length, channel])
|
||||||
|
s1 = tf.broadcast_to(c1, shape=[batch_size, length, channel])
|
||||||
|
# s2 = tf.broadcast_to(s2, shape=[batch_size, length, channel])
|
||||||
|
|
||||||
|
output = weight_kernel * s1 * inputs
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class DynamicPooling(layers.Layer):
|
||||||
|
|
||||||
|
def __init__(self, pool_size=2):
|
||||||
|
# 调用父类__init__()方法
|
||||||
|
super(DynamicPooling, self).__init__()
|
||||||
|
self.pool_size = pool_size
|
||||||
|
pass
|
||||||
|
|
||||||
|
def build(self, input_shape):
|
||||||
|
if len(input_shape) != 3:
|
||||||
|
raise ValueError('Inputs to `DynamicChannelAttention` should have rank 3. '
|
||||||
|
'Received input shape:', str(input_shape))
|
||||||
|
# GAP
|
||||||
|
self.AP = tf.keras.layers.AveragePooling1D(pool_size=self.pool_size)
|
||||||
|
|
||||||
|
# GMP
|
||||||
|
self.MP = tf.keras.layers.MaxPool1D(pool_size=self.pool_size)
|
||||||
|
|
||||||
|
# weight
|
||||||
|
self.weight_kernel = self.add_weight(
|
||||||
|
shape=(int(input_shape[1] / self.pool_size), input_shape[2]),
|
||||||
|
initializer='glorot_uniform',
|
||||||
|
name='weight_kernel',
|
||||||
|
constraint=Between_0_1())
|
||||||
|
|
||||||
|
def call(self, inputs, **kwargs):
|
||||||
|
batch_size, length, channel = inputs.shape
|
||||||
|
|
||||||
|
# GAP
|
||||||
|
GAP = self.AP(inputs)
|
||||||
|
|
||||||
|
# GMP
|
||||||
|
GMP = self.MP(inputs)
|
||||||
|
|
||||||
|
weight_kernel = tf.broadcast_to(self.weight_kernel, shape=GMP.shape)
|
||||||
|
|
||||||
|
output = tf.add(weight_kernel * GAP, (tf.ones_like(weight_kernel) - weight_kernel) * GMP)
|
||||||
|
return output
|
||||||
|
|
@ -0,0 +1,457 @@
|
||||||
|
# _*_ coding: UTF-8 _*_
|
||||||
|
|
||||||
|
|
||||||
|
'''
|
||||||
|
@Author : dingjiawen
|
||||||
|
@Date : 2022/7/14 9:40
|
||||||
|
@Usage : 联合监测模型
|
||||||
|
@Desc : RNet:DCAU只分类,不预测
|
||||||
|
'''
|
||||||
|
|
||||||
|
import tensorflow as tf
|
||||||
|
import tensorflow.keras as keras
|
||||||
|
from tensorflow.keras import *
|
||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
from model.DepthwiseCon1D.DepthwiseConv1D import DepthwiseConv1D
|
||||||
|
from model.Dynamic_channelAttention.Dynamic_channelAttention import DynamicChannelAttention, DynamicPooling
|
||||||
|
from condition_monitoring.data_deal import loadData
|
||||||
|
from model.LossFunction.smooth_L1_Loss import SmoothL1Loss
|
||||||
|
|
||||||
|
|
||||||
|
class Joint_Monitoring(keras.Model):
|
||||||
|
|
||||||
|
def __init__(self, conv_filter=20):
|
||||||
|
# 调用父类__init__()方法
|
||||||
|
super(Joint_Monitoring, self).__init__()
|
||||||
|
# step one
|
||||||
|
self.RepDCBlock1 = RevConvBlock(num=3, kernel_size=5)
|
||||||
|
self.conv1 = tf.keras.layers.Conv1D(filters=conv_filter, kernel_size=1, strides=2, padding='SAME')
|
||||||
|
self.upsample1 = tf.keras.layers.UpSampling1D(size=2)
|
||||||
|
|
||||||
|
self.DACU2 = DynamicChannelAttention()
|
||||||
|
self.RepDCBlock2 = RevConvBlock(num=3, kernel_size=3)
|
||||||
|
self.conv2 = tf.keras.layers.Conv1D(filters=2 * conv_filter, kernel_size=1, strides=2, padding='SAME')
|
||||||
|
self.upsample2 = tf.keras.layers.UpSampling1D(size=2)
|
||||||
|
|
||||||
|
self.DACU3 = DynamicChannelAttention()
|
||||||
|
self.RepDCBlock3 = RevConvBlock(num=3, kernel_size=3)
|
||||||
|
self.p1 = DynamicPooling(pool_size=2)
|
||||||
|
self.conv3 = tf.keras.layers.Conv1D(filters=2 * conv_filter, kernel_size=3, strides=2, padding='SAME')
|
||||||
|
|
||||||
|
self.DACU4 = DynamicChannelAttention()
|
||||||
|
self.RepDCBlock4 = RevConvBlock(num=3, kernel_size=3)
|
||||||
|
self.p2 = DynamicPooling(pool_size=4)
|
||||||
|
self.conv4 = tf.keras.layers.Conv1D(filters=conv_filter, kernel_size=3, strides=2, padding='SAME')
|
||||||
|
|
||||||
|
self.RepDCBlock5 = RevConvBlock(num=3, kernel_size=3)
|
||||||
|
self.p3 = DynamicPooling(pool_size=2)
|
||||||
|
|
||||||
|
# step two
|
||||||
|
# 重现原数据
|
||||||
|
self.GRU1 = tf.keras.layers.GRU(128, return_sequences=False)
|
||||||
|
self.d1 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu)
|
||||||
|
self.output1 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu)
|
||||||
|
|
||||||
|
self.GRU2 = tf.keras.layers.GRU(128, return_sequences=False)
|
||||||
|
self.d2 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu)
|
||||||
|
self.output2 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu)
|
||||||
|
|
||||||
|
self.GRU3 = tf.keras.layers.GRU(128, return_sequences=False)
|
||||||
|
self.d3 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu)
|
||||||
|
self.output3 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu)
|
||||||
|
|
||||||
|
# step three
|
||||||
|
# 分类器
|
||||||
|
self.d4 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu)
|
||||||
|
self.d5 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu)
|
||||||
|
# tf.nn.softmax
|
||||||
|
self.output4 = tf.keras.layers.Dense(1, activation=tf.nn.sigmoid)
|
||||||
|
|
||||||
|
|
||||||
|
# loss
|
||||||
|
self.train_loss = []
|
||||||
|
|
||||||
|
def call(self, inputs, training=None, mask=None, is_first_time: bool = True):
|
||||||
|
# step one
|
||||||
|
RepDCBlock1 = self.RepDCBlock1(inputs)
|
||||||
|
RepDCBlock1 = tf.keras.layers.BatchNormalization()(RepDCBlock1)
|
||||||
|
conv1 = self.conv1(RepDCBlock1)
|
||||||
|
conv1 = tf.nn.leaky_relu(conv1)
|
||||||
|
conv1 = tf.keras.layers.BatchNormalization()(conv1)
|
||||||
|
upsample1 = self.upsample1(conv1)
|
||||||
|
|
||||||
|
DACU2 = self.DACU2(upsample1)
|
||||||
|
DACU2 = tf.keras.layers.BatchNormalization()(DACU2)
|
||||||
|
RepDCBlock2 = self.RepDCBlock2(DACU2)
|
||||||
|
RepDCBlock2 = tf.keras.layers.BatchNormalization()(RepDCBlock2)
|
||||||
|
conv2 = self.conv2(RepDCBlock2)
|
||||||
|
conv2 = tf.nn.leaky_relu(conv2)
|
||||||
|
conv2 = tf.keras.layers.BatchNormalization()(conv2)
|
||||||
|
upsample2 = self.upsample2(conv2)
|
||||||
|
|
||||||
|
DACU3 = self.DACU3(upsample2)
|
||||||
|
DACU3 = tf.keras.layers.BatchNormalization()(DACU3)
|
||||||
|
RepDCBlock3 = self.RepDCBlock3(DACU3)
|
||||||
|
RepDCBlock3 = tf.keras.layers.BatchNormalization()(RepDCBlock3)
|
||||||
|
conv3 = self.conv3(RepDCBlock3)
|
||||||
|
conv3 = tf.nn.leaky_relu(conv3)
|
||||||
|
conv3 = tf.keras.layers.BatchNormalization()(conv3)
|
||||||
|
|
||||||
|
concat1 = tf.concat([conv2, conv3], axis=1)
|
||||||
|
|
||||||
|
DACU4 = self.DACU4(concat1)
|
||||||
|
DACU4 = tf.keras.layers.BatchNormalization()(DACU4)
|
||||||
|
RepDCBlock4 = self.RepDCBlock4(DACU4)
|
||||||
|
RepDCBlock4 = tf.keras.layers.BatchNormalization()(RepDCBlock4)
|
||||||
|
conv4 = self.conv4(RepDCBlock4)
|
||||||
|
conv4 = tf.nn.leaky_relu(conv4)
|
||||||
|
conv4 = tf.keras.layers.BatchNormalization()(conv4)
|
||||||
|
|
||||||
|
concat2 = tf.concat([conv1, conv4], axis=1)
|
||||||
|
|
||||||
|
RepDCBlock5 = self.RepDCBlock5(concat2)
|
||||||
|
RepDCBlock5 = tf.keras.layers.BatchNormalization()(RepDCBlock5)
|
||||||
|
|
||||||
|
output1 = []
|
||||||
|
output2 = []
|
||||||
|
output3 = []
|
||||||
|
output4 = []
|
||||||
|
|
||||||
|
if is_first_time:
|
||||||
|
# step two
|
||||||
|
# 重现原数据
|
||||||
|
# 接block3
|
||||||
|
GRU1 = self.GRU1(RepDCBlock3)
|
||||||
|
GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
|
||||||
|
d1 = self.d1(GRU1)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output1 = self.output1(d1)
|
||||||
|
# 接block4
|
||||||
|
GRU2 = self.GRU2(RepDCBlock4)
|
||||||
|
GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
|
||||||
|
d2 = self.d2(GRU2)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output2 = self.output2(d2)
|
||||||
|
# 接block5
|
||||||
|
GRU3 = self.GRU3(RepDCBlock5)
|
||||||
|
GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
|
||||||
|
d3 = self.d3(GRU3)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output3 = self.output3(d3)
|
||||||
|
else:
|
||||||
|
GRU1 = self.GRU1(RepDCBlock3)
|
||||||
|
GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
|
||||||
|
d1 = self.d1(GRU1)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output1 = self.output1(d1)
|
||||||
|
# 接block4
|
||||||
|
GRU2 = self.GRU2(RepDCBlock4)
|
||||||
|
GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
|
||||||
|
d2 = self.d2(GRU2)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output2 = self.output2(d2)
|
||||||
|
# 接block5
|
||||||
|
GRU3 = self.GRU3(RepDCBlock5)
|
||||||
|
GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
|
||||||
|
d3 = self.d3(GRU3)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output3 = self.output3(d3)
|
||||||
|
|
||||||
|
# 多尺度动态池化
|
||||||
|
# p1 = self.p1(output1)
|
||||||
|
# B, _, _ = p1.shape
|
||||||
|
# f1 = tf.reshape(p1, shape=[B, -1])
|
||||||
|
# p2 = self.p2(output2)
|
||||||
|
# f2 = tf.reshape(p2, shape=[B, -1])
|
||||||
|
# p3 = self.p3(output3)
|
||||||
|
# f3 = tf.reshape(p3, shape=[B, -1])
|
||||||
|
# step three
|
||||||
|
# 分类器
|
||||||
|
# concat3 = tf.concat([output1, output2, output3], axis=1)
|
||||||
|
concat3=output1
|
||||||
|
# dropout = tf.keras.layers.Dropout(0.25)(concat3)
|
||||||
|
d4 = self.d4(concat3)
|
||||||
|
d5 = self.d5(d4)
|
||||||
|
# d4 = tf.keras.layers.BatchNormalization()(d4)
|
||||||
|
output4 = self.output4(d5)
|
||||||
|
|
||||||
|
return output1, output2, output3, output4
|
||||||
|
|
||||||
|
def get_loss(self, inputs_tensor, label1=None, label2=None, is_first_time: bool = True, pred_3=None, pred_4=None,
|
||||||
|
pred_5=None):
|
||||||
|
# step one
|
||||||
|
RepDCBlock1 = self.RepDCBlock1(inputs_tensor)
|
||||||
|
RepDCBlock1 = tf.keras.layers.BatchNormalization()(RepDCBlock1)
|
||||||
|
conv1 = self.conv1(RepDCBlock1)
|
||||||
|
conv1 = tf.nn.leaky_relu(conv1)
|
||||||
|
conv1 = tf.keras.layers.BatchNormalization()(conv1)
|
||||||
|
upsample1 = self.upsample1(conv1)
|
||||||
|
|
||||||
|
DACU2 = self.DACU2(upsample1)
|
||||||
|
DACU2 = tf.keras.layers.BatchNormalization()(DACU2)
|
||||||
|
RepDCBlock2 = self.RepDCBlock2(DACU2)
|
||||||
|
RepDCBlock2 = tf.keras.layers.BatchNormalization()(RepDCBlock2)
|
||||||
|
conv2 = self.conv2(RepDCBlock2)
|
||||||
|
conv2 = tf.nn.leaky_relu(conv2)
|
||||||
|
conv2 = tf.keras.layers.BatchNormalization()(conv2)
|
||||||
|
upsample2 = self.upsample2(conv2)
|
||||||
|
|
||||||
|
DACU3 = self.DACU3(upsample2)
|
||||||
|
DACU3 = tf.keras.layers.BatchNormalization()(DACU3)
|
||||||
|
RepDCBlock3 = self.RepDCBlock3(DACU3)
|
||||||
|
RepDCBlock3 = tf.keras.layers.BatchNormalization()(RepDCBlock3)
|
||||||
|
conv3 = self.conv3(RepDCBlock3)
|
||||||
|
conv3 = tf.nn.leaky_relu(conv3)
|
||||||
|
conv3 = tf.keras.layers.BatchNormalization()(conv3)
|
||||||
|
|
||||||
|
concat1 = tf.concat([conv2, conv3], axis=1)
|
||||||
|
|
||||||
|
DACU4 = self.DACU4(concat1)
|
||||||
|
DACU4 = tf.keras.layers.BatchNormalization()(DACU4)
|
||||||
|
RepDCBlock4 = self.RepDCBlock4(DACU4)
|
||||||
|
RepDCBlock4 = tf.keras.layers.BatchNormalization()(RepDCBlock4)
|
||||||
|
conv4 = self.conv4(RepDCBlock4)
|
||||||
|
conv4 = tf.nn.leaky_relu(conv4)
|
||||||
|
conv4 = tf.keras.layers.BatchNormalization()(conv4)
|
||||||
|
|
||||||
|
concat2 = tf.concat([conv1, conv4], axis=1)
|
||||||
|
|
||||||
|
RepDCBlock5 = self.RepDCBlock5(concat2)
|
||||||
|
RepDCBlock5 = tf.keras.layers.BatchNormalization()(RepDCBlock5)
|
||||||
|
|
||||||
|
if is_first_time:
|
||||||
|
# step two
|
||||||
|
# 重现原数据
|
||||||
|
# 接block3
|
||||||
|
GRU1 = self.GRU1(RepDCBlock3)
|
||||||
|
GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
|
||||||
|
d1 = self.d1(GRU1)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output1 = self.output1(d1)
|
||||||
|
# 接block4
|
||||||
|
GRU2 = self.GRU2(RepDCBlock4)
|
||||||
|
GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
|
||||||
|
d2 = self.d2(GRU2)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output2 = self.output2(d2)
|
||||||
|
# 接block5
|
||||||
|
GRU3 = self.GRU3(RepDCBlock5)
|
||||||
|
GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
|
||||||
|
d3 = self.d3(GRU3)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output3 = self.output3(d3)
|
||||||
|
|
||||||
|
# reduce_mean降维计算均值
|
||||||
|
MSE_loss1 = SmoothL1Loss()(y_true=label1, y_pred=output1)
|
||||||
|
MSE_loss2 = SmoothL1Loss()(y_true=label1, y_pred=output2)
|
||||||
|
MSE_loss3 = SmoothL1Loss()(y_true=label1, y_pred=output3)
|
||||||
|
# MSE_loss1 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output1))
|
||||||
|
# MSE_loss2 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output2))
|
||||||
|
# MSE_loss3 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output3))
|
||||||
|
|
||||||
|
print("MSE_loss1:", MSE_loss1.numpy())
|
||||||
|
print("MSE_loss2:", MSE_loss2.numpy())
|
||||||
|
print("MSE_loss3:", MSE_loss3.numpy())
|
||||||
|
loss = MSE_loss1 + MSE_loss2 + MSE_loss3
|
||||||
|
Accuracy_num = 0
|
||||||
|
|
||||||
|
else:
|
||||||
|
# step two
|
||||||
|
# 重现原数据
|
||||||
|
# 接block3
|
||||||
|
GRU1 = self.GRU1(RepDCBlock3)
|
||||||
|
GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
|
||||||
|
d1 = self.d1(GRU1)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output1 = self.output1(d1)
|
||||||
|
# 接block4
|
||||||
|
GRU2 = self.GRU2(RepDCBlock4)
|
||||||
|
GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
|
||||||
|
d2 = self.d2(GRU2)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output2 = self.output2(d2)
|
||||||
|
# 接block5
|
||||||
|
GRU3 = self.GRU3(RepDCBlock5)
|
||||||
|
GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
|
||||||
|
d3 = self.d3(GRU3)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output3 = self.output3(d3)
|
||||||
|
|
||||||
|
# 多尺度动态池化
|
||||||
|
# p1 = self.p1(output1)
|
||||||
|
# B, _, _ = p1.shape
|
||||||
|
# f1 = tf.reshape(p1, shape=[B, -1])
|
||||||
|
# p2 = self.p2(output2)
|
||||||
|
# f2 = tf.reshape(p2, shape=[B, -1])
|
||||||
|
# p3 = self.p3(output3)
|
||||||
|
# f3 = tf.reshape(p3, shape=[B, -1])
|
||||||
|
# step three
|
||||||
|
# 分类器
|
||||||
|
# concat3 = tf.concat([output1, output2, output3], axis=1)
|
||||||
|
# dropout = tf.keras.layers.Dropout(0.25)(concat3)
|
||||||
|
concat3 = output1
|
||||||
|
d4 = self.d4(concat3)
|
||||||
|
d5 = self.d5(d4)
|
||||||
|
# d4 = tf.keras.layers.BatchNormalization()(d4)
|
||||||
|
output4 = self.output4(d5)
|
||||||
|
|
||||||
|
# reduce_mean降维计算均值
|
||||||
|
MSE_loss= 0
|
||||||
|
# MSE_loss = SmoothL1Loss()(y_true=pred_3, y_pred=output1)
|
||||||
|
# MSE_loss += SmoothL1Loss()(y_true=pred_4, y_pred=output2)
|
||||||
|
# MSE_loss += SmoothL1Loss()(y_true=pred_5, y_pred=output3)
|
||||||
|
Cross_Entropy_loss = tf.reduce_mean(
|
||||||
|
tf.losses.binary_crossentropy(y_true=label2, y_pred=output4, from_logits=True))
|
||||||
|
|
||||||
|
print("MSE_loss:", MSE_loss)
|
||||||
|
print("Cross_Entropy_loss:", Cross_Entropy_loss.numpy())
|
||||||
|
Accuracy_num = self.get_Accuracy(label=label2, output=output4)
|
||||||
|
loss = Cross_Entropy_loss
|
||||||
|
return loss, Accuracy_num
|
||||||
|
|
||||||
|
def get_Accuracy(self, output, label):
|
||||||
|
|
||||||
|
predict_label = tf.round(output)
|
||||||
|
label = tf.cast(label, dtype=tf.float32)
|
||||||
|
|
||||||
|
t = np.array(label - predict_label)
|
||||||
|
|
||||||
|
b = t[t[:] == 0]
|
||||||
|
|
||||||
|
return b.__len__()
|
||||||
|
|
||||||
|
def get_grad(self, input_tensor, label1=None, label2=None, is_first_time: bool = True, pred_3=None, pred_4=None,
|
||||||
|
pred_5=None):
|
||||||
|
with tf.GradientTape() as tape:
|
||||||
|
# todo 原本tape只会监控由tf.Variable创建的trainable=True属性
|
||||||
|
# tape.watch(self.variables)
|
||||||
|
L, Accuracy_num = self.get_loss(input_tensor, label1=label1, label2=label2, is_first_time=is_first_time,
|
||||||
|
pred_3=pred_3,
|
||||||
|
pred_4=pred_4, pred_5=pred_5)
|
||||||
|
# 保存一下loss,用于输出
|
||||||
|
self.train_loss = L
|
||||||
|
g = tape.gradient(L, self.variables)
|
||||||
|
return g, Accuracy_num
|
||||||
|
|
||||||
|
def train(self, input_tensor, label1=None, label2=None, learning_rate=1e-3, is_first_time: bool = True, pred_3=None,
|
||||||
|
pred_4=None, pred_5=None):
|
||||||
|
g, Accuracy_num = self.get_grad(input_tensor, label1=label1, label2=label2, is_first_time=is_first_time,
|
||||||
|
pred_3=pred_3,
|
||||||
|
pred_4=pred_4, pred_5=pred_5)
|
||||||
|
optimizers.Adam(learning_rate).apply_gradients(zip(g, self.variables))
|
||||||
|
return self.train_loss, Accuracy_num
|
||||||
|
|
||||||
|
# 暂时只支持batch_size等于1,不然要传z比较麻烦
|
||||||
|
def get_val_loss(self, val_data, val_label1, val_label2, batch_size=16, is_first_time: bool = True,
|
||||||
|
step_one_model=None):
|
||||||
|
val_loss = []
|
||||||
|
accuracy_num = 0
|
||||||
|
output1 = 0
|
||||||
|
output2 = 0
|
||||||
|
output3 = 0
|
||||||
|
z = 1
|
||||||
|
size, length, dims = val_data.shape
|
||||||
|
if batch_size == None:
|
||||||
|
batch_size = self.batch_size
|
||||||
|
for epoch in range(0, size - batch_size, batch_size):
|
||||||
|
each_val_data = val_data[epoch:epoch + batch_size, :, :]
|
||||||
|
each_val_label1 = val_label1[epoch:epoch + batch_size, :]
|
||||||
|
each_val_label2 = val_label2[epoch:epoch + batch_size, ]
|
||||||
|
# each_val_data = tf.expand_dims(each_val_data, axis=0)
|
||||||
|
# each_val_query = tf.expand_dims(each_val_query, axis=0)
|
||||||
|
# each_val_label = tf.expand_dims(each_val_label, axis=0)
|
||||||
|
if not is_first_time:
|
||||||
|
output1, output2, output3, _ = step_one_model.call(inputs=each_val_data, is_first_time=True)
|
||||||
|
|
||||||
|
each_loss, each_accuracy_num = self.get_loss(each_val_data, each_val_label1, each_val_label2,
|
||||||
|
is_first_time=is_first_time,
|
||||||
|
pred_3=output1, pred_4=output2, pred_5=output3)
|
||||||
|
accuracy_num += each_accuracy_num
|
||||||
|
val_loss.append(each_loss)
|
||||||
|
z += 1
|
||||||
|
|
||||||
|
val_accuracy = accuracy_num / ((z-1) * batch_size)
|
||||||
|
val_total_loss = tf.reduce_mean(val_loss)
|
||||||
|
return val_total_loss, val_accuracy
|
||||||
|
|
||||||
|
|
||||||
|
class RevConv(keras.layers.Layer):
|
||||||
|
|
||||||
|
def __init__(self, kernel_size=3):
|
||||||
|
# 调用父类__init__()方法
|
||||||
|
super(RevConv, self).__init__()
|
||||||
|
self.kernel_size = kernel_size
|
||||||
|
|
||||||
|
def get_config(self):
|
||||||
|
# 自定义层里面的属性
|
||||||
|
config = (
|
||||||
|
{
|
||||||
|
'kernel_size': self.kernel_size
|
||||||
|
}
|
||||||
|
)
|
||||||
|
base_config = super(RevConv, self).get_config()
|
||||||
|
return dict(list(base_config.items()) + list(config.items()))
|
||||||
|
|
||||||
|
def build(self, input_shape):
|
||||||
|
# print(input_shape)
|
||||||
|
_, _, output_dim = input_shape[0], input_shape[1], input_shape[2]
|
||||||
|
self.conv1 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=self.kernel_size, strides=1,
|
||||||
|
padding='causal',
|
||||||
|
dilation_rate=4)
|
||||||
|
|
||||||
|
self.conv2 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=1, strides=1, padding='causal',
|
||||||
|
dilation_rate=4)
|
||||||
|
# self.b2 = tf.keras.layers.BatchNormalization()
|
||||||
|
|
||||||
|
# self.b3 = tf.keras.layers.BatchNormalization()
|
||||||
|
|
||||||
|
# out = tf.keras.layers.Add()([b1, b2, b3])
|
||||||
|
# out = tf.nn.relu(out)
|
||||||
|
|
||||||
|
def call(self, inputs, **kwargs):
|
||||||
|
conv1 = self.conv1(inputs)
|
||||||
|
b1 = tf.keras.layers.BatchNormalization()(conv1)
|
||||||
|
b1 = tf.nn.leaky_relu(b1)
|
||||||
|
# b1 = self.b1
|
||||||
|
|
||||||
|
conv2 = self.conv2(inputs)
|
||||||
|
b2 = tf.keras.layers.BatchNormalization()(conv2)
|
||||||
|
b2 = tf.nn.leaky_relu(b2)
|
||||||
|
|
||||||
|
b3 = tf.keras.layers.BatchNormalization()(inputs)
|
||||||
|
|
||||||
|
out = tf.keras.layers.Add()([b1, b2, b3])
|
||||||
|
out = tf.nn.relu(out)
|
||||||
|
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
class RevConvBlock(keras.layers.Layer):
|
||||||
|
|
||||||
|
def __init__(self, num: int = 3, kernel_size=3):
|
||||||
|
# 调用父类__init__()方法
|
||||||
|
super(RevConvBlock, self).__init__()
|
||||||
|
self.num = num
|
||||||
|
self.kernel_size = kernel_size
|
||||||
|
self.L = []
|
||||||
|
for i in range(num):
|
||||||
|
RepVGG = RevConv(kernel_size=kernel_size)
|
||||||
|
self.L.append(RepVGG)
|
||||||
|
|
||||||
|
def get_config(self):
|
||||||
|
# 自定义层里面的属性
|
||||||
|
config = (
|
||||||
|
{
|
||||||
|
'kernel_size': self.kernel_size,
|
||||||
|
'num': self.num
|
||||||
|
}
|
||||||
|
)
|
||||||
|
base_config = super(RevConvBlock, self).get_config()
|
||||||
|
return dict(list(base_config.items()) + list(config.items()))
|
||||||
|
|
||||||
|
def call(self, inputs, **kwargs):
|
||||||
|
for i in range(self.num):
|
||||||
|
inputs = self.L[i](inputs)
|
||||||
|
return inputs
|
||||||
|
|
@ -0,0 +1,455 @@
|
||||||
|
# _*_ coding: UTF-8 _*_
|
||||||
|
|
||||||
|
|
||||||
|
'''
|
||||||
|
@Author : dingjiawen
|
||||||
|
@Date : 2022/7/14 9:40
|
||||||
|
@Usage : 联合监测模型
|
||||||
|
@Desc : RNet:DCAU只分类,不预测
|
||||||
|
'''
|
||||||
|
|
||||||
|
import tensorflow as tf
|
||||||
|
import tensorflow.keras as keras
|
||||||
|
from tensorflow.keras import *
|
||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
from model.DepthwiseCon1D.DepthwiseConv1D import DepthwiseConv1D
|
||||||
|
from model.Dynamic_channelAttention.Dynamic_channelAttention import DynamicChannelAttention, DynamicPooling
|
||||||
|
from condition_monitoring.data_deal import loadData
|
||||||
|
from model.LossFunction.smooth_L1_Loss import SmoothL1Loss
|
||||||
|
|
||||||
|
|
||||||
|
class Joint_Monitoring(keras.Model):
|
||||||
|
|
||||||
|
def __init__(self, conv_filter=20):
|
||||||
|
# 调用父类__init__()方法
|
||||||
|
super(Joint_Monitoring, self).__init__()
|
||||||
|
# step one
|
||||||
|
self.RepDCBlock1 = RevConvBlock(num=3, kernel_size=5)
|
||||||
|
self.conv1 = tf.keras.layers.Conv1D(filters=conv_filter, kernel_size=1, strides=2, padding='SAME')
|
||||||
|
self.upsample1 = tf.keras.layers.UpSampling1D(size=2)
|
||||||
|
|
||||||
|
self.DACU2 = DynamicChannelAttention()
|
||||||
|
self.RepDCBlock2 = RevConvBlock(num=3, kernel_size=3)
|
||||||
|
self.conv2 = tf.keras.layers.Conv1D(filters=2 * conv_filter, kernel_size=1, strides=2, padding='SAME')
|
||||||
|
self.upsample2 = tf.keras.layers.UpSampling1D(size=2)
|
||||||
|
|
||||||
|
self.DACU3 = DynamicChannelAttention()
|
||||||
|
self.RepDCBlock3 = RevConvBlock(num=3, kernel_size=3)
|
||||||
|
self.p1 = DynamicPooling(pool_size=2)
|
||||||
|
self.conv3 = tf.keras.layers.Conv1D(filters=2 * conv_filter, kernel_size=3, strides=2, padding='SAME')
|
||||||
|
|
||||||
|
self.DACU4 = DynamicChannelAttention()
|
||||||
|
self.RepDCBlock4 = RevConvBlock(num=3, kernel_size=3)
|
||||||
|
self.p2 = DynamicPooling(pool_size=4)
|
||||||
|
self.conv4 = tf.keras.layers.Conv1D(filters=conv_filter, kernel_size=3, strides=2, padding='SAME')
|
||||||
|
|
||||||
|
self.RepDCBlock5 = RevConvBlock(num=3, kernel_size=3)
|
||||||
|
self.p3 = DynamicPooling(pool_size=2)
|
||||||
|
|
||||||
|
# step two
|
||||||
|
# 重现原数据
|
||||||
|
self.GRU1 = tf.keras.layers.GRU(128, return_sequences=False)
|
||||||
|
self.d1 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu)
|
||||||
|
self.output1 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu)
|
||||||
|
|
||||||
|
self.GRU2 = tf.keras.layers.GRU(128, return_sequences=False)
|
||||||
|
self.d2 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu)
|
||||||
|
self.output2 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu)
|
||||||
|
|
||||||
|
self.GRU3 = tf.keras.layers.GRU(128, return_sequences=False)
|
||||||
|
self.d3 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu)
|
||||||
|
self.output3 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu)
|
||||||
|
|
||||||
|
# step three
|
||||||
|
# 分类器
|
||||||
|
self.d4 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu)
|
||||||
|
self.d5 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu)
|
||||||
|
# tf.nn.softmax
|
||||||
|
self.output4 = tf.keras.layers.Dense(1, activation=tf.nn.sigmoid)
|
||||||
|
|
||||||
|
|
||||||
|
# loss
|
||||||
|
self.train_loss = []
|
||||||
|
|
||||||
|
def call(self, inputs, training=None, mask=None, is_first_time: bool = True):
|
||||||
|
# step one
|
||||||
|
RepDCBlock1 = self.RepDCBlock1(inputs)
|
||||||
|
RepDCBlock1 = tf.keras.layers.BatchNormalization()(RepDCBlock1)
|
||||||
|
conv1 = self.conv1(RepDCBlock1)
|
||||||
|
conv1 = tf.nn.leaky_relu(conv1)
|
||||||
|
conv1 = tf.keras.layers.BatchNormalization()(conv1)
|
||||||
|
upsample1 = self.upsample1(conv1)
|
||||||
|
|
||||||
|
DACU2 = self.DACU2(upsample1)
|
||||||
|
DACU2 = tf.keras.layers.BatchNormalization()(DACU2)
|
||||||
|
RepDCBlock2 = self.RepDCBlock2(DACU2)
|
||||||
|
RepDCBlock2 = tf.keras.layers.BatchNormalization()(RepDCBlock2)
|
||||||
|
conv2 = self.conv2(RepDCBlock2)
|
||||||
|
conv2 = tf.nn.leaky_relu(conv2)
|
||||||
|
conv2 = tf.keras.layers.BatchNormalization()(conv2)
|
||||||
|
upsample2 = self.upsample2(conv2)
|
||||||
|
|
||||||
|
DACU3 = self.DACU3(upsample2)
|
||||||
|
DACU3 = tf.keras.layers.BatchNormalization()(DACU3)
|
||||||
|
RepDCBlock3 = self.RepDCBlock3(DACU3)
|
||||||
|
RepDCBlock3 = tf.keras.layers.BatchNormalization()(RepDCBlock3)
|
||||||
|
conv3 = self.conv3(RepDCBlock3)
|
||||||
|
conv3 = tf.nn.leaky_relu(conv3)
|
||||||
|
conv3 = tf.keras.layers.BatchNormalization()(conv3)
|
||||||
|
|
||||||
|
concat1 = tf.concat([conv2, conv3], axis=1)
|
||||||
|
|
||||||
|
DACU4 = self.DACU4(concat1)
|
||||||
|
DACU4 = tf.keras.layers.BatchNormalization()(DACU4)
|
||||||
|
RepDCBlock4 = self.RepDCBlock4(DACU4)
|
||||||
|
RepDCBlock4 = tf.keras.layers.BatchNormalization()(RepDCBlock4)
|
||||||
|
conv4 = self.conv4(RepDCBlock4)
|
||||||
|
conv4 = tf.nn.leaky_relu(conv4)
|
||||||
|
conv4 = tf.keras.layers.BatchNormalization()(conv4)
|
||||||
|
|
||||||
|
concat2 = tf.concat([conv1, conv4], axis=1)
|
||||||
|
|
||||||
|
RepDCBlock5 = self.RepDCBlock5(concat2)
|
||||||
|
RepDCBlock5 = tf.keras.layers.BatchNormalization()(RepDCBlock5)
|
||||||
|
|
||||||
|
output1 = []
|
||||||
|
output2 = []
|
||||||
|
output3 = []
|
||||||
|
output4 = []
|
||||||
|
|
||||||
|
if is_first_time:
|
||||||
|
# step two
|
||||||
|
# 重现原数据
|
||||||
|
# 接block3
|
||||||
|
GRU1 = self.GRU1(RepDCBlock3)
|
||||||
|
GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
|
||||||
|
d1 = self.d1(GRU1)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output1 = self.output1(d1)
|
||||||
|
# 接block4
|
||||||
|
GRU2 = self.GRU2(RepDCBlock4)
|
||||||
|
GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
|
||||||
|
d2 = self.d2(GRU2)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output2 = self.output2(d2)
|
||||||
|
# 接block5
|
||||||
|
GRU3 = self.GRU3(RepDCBlock5)
|
||||||
|
GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
|
||||||
|
d3 = self.d3(GRU3)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output3 = self.output3(d3)
|
||||||
|
else:
|
||||||
|
GRU1 = self.GRU1(RepDCBlock3)
|
||||||
|
GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
|
||||||
|
d1 = self.d1(GRU1)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output1 = self.output1(d1)
|
||||||
|
# 接block4
|
||||||
|
GRU2 = self.GRU2(RepDCBlock4)
|
||||||
|
GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
|
||||||
|
d2 = self.d2(GRU2)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output2 = self.output2(d2)
|
||||||
|
# 接block5
|
||||||
|
GRU3 = self.GRU3(RepDCBlock5)
|
||||||
|
GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
|
||||||
|
d3 = self.d3(GRU3)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output3 = self.output3(d3)
|
||||||
|
|
||||||
|
# 多尺度动态池化
|
||||||
|
# p1 = self.p1(output1)
|
||||||
|
# B, _, _ = p1.shape
|
||||||
|
# f1 = tf.reshape(p1, shape=[B, -1])
|
||||||
|
# p2 = self.p2(output2)
|
||||||
|
# f2 = tf.reshape(p2, shape=[B, -1])
|
||||||
|
# p3 = self.p3(output3)
|
||||||
|
# f3 = tf.reshape(p3, shape=[B, -1])
|
||||||
|
# step three
|
||||||
|
# 分类器
|
||||||
|
concat3 = tf.concat([output1, output2], axis=1)
|
||||||
|
# dropout = tf.keras.layers.Dropout(0.25)(concat3)
|
||||||
|
d4 = self.d4(concat3)
|
||||||
|
d5 = self.d5(d4)
|
||||||
|
# d4 = tf.keras.layers.BatchNormalization()(d4)
|
||||||
|
output4 = self.output4(d5)
|
||||||
|
|
||||||
|
return output1, output2, output3, output4
|
||||||
|
|
||||||
|
def get_loss(self, inputs_tensor, label1=None, label2=None, is_first_time: bool = True, pred_3=None, pred_4=None,
|
||||||
|
pred_5=None):
|
||||||
|
# step one
|
||||||
|
RepDCBlock1 = self.RepDCBlock1(inputs_tensor)
|
||||||
|
RepDCBlock1 = tf.keras.layers.BatchNormalization()(RepDCBlock1)
|
||||||
|
conv1 = self.conv1(RepDCBlock1)
|
||||||
|
conv1 = tf.nn.leaky_relu(conv1)
|
||||||
|
conv1 = tf.keras.layers.BatchNormalization()(conv1)
|
||||||
|
upsample1 = self.upsample1(conv1)
|
||||||
|
|
||||||
|
DACU2 = self.DACU2(upsample1)
|
||||||
|
DACU2 = tf.keras.layers.BatchNormalization()(DACU2)
|
||||||
|
RepDCBlock2 = self.RepDCBlock2(DACU2)
|
||||||
|
RepDCBlock2 = tf.keras.layers.BatchNormalization()(RepDCBlock2)
|
||||||
|
conv2 = self.conv2(RepDCBlock2)
|
||||||
|
conv2 = tf.nn.leaky_relu(conv2)
|
||||||
|
conv2 = tf.keras.layers.BatchNormalization()(conv2)
|
||||||
|
upsample2 = self.upsample2(conv2)
|
||||||
|
|
||||||
|
DACU3 = self.DACU3(upsample2)
|
||||||
|
DACU3 = tf.keras.layers.BatchNormalization()(DACU3)
|
||||||
|
RepDCBlock3 = self.RepDCBlock3(DACU3)
|
||||||
|
RepDCBlock3 = tf.keras.layers.BatchNormalization()(RepDCBlock3)
|
||||||
|
conv3 = self.conv3(RepDCBlock3)
|
||||||
|
conv3 = tf.nn.leaky_relu(conv3)
|
||||||
|
conv3 = tf.keras.layers.BatchNormalization()(conv3)
|
||||||
|
|
||||||
|
concat1 = tf.concat([conv2, conv3], axis=1)
|
||||||
|
|
||||||
|
DACU4 = self.DACU4(concat1)
|
||||||
|
DACU4 = tf.keras.layers.BatchNormalization()(DACU4)
|
||||||
|
RepDCBlock4 = self.RepDCBlock4(DACU4)
|
||||||
|
RepDCBlock4 = tf.keras.layers.BatchNormalization()(RepDCBlock4)
|
||||||
|
conv4 = self.conv4(RepDCBlock4)
|
||||||
|
conv4 = tf.nn.leaky_relu(conv4)
|
||||||
|
conv4 = tf.keras.layers.BatchNormalization()(conv4)
|
||||||
|
|
||||||
|
concat2 = tf.concat([conv1, conv4], axis=1)
|
||||||
|
|
||||||
|
RepDCBlock5 = self.RepDCBlock5(concat2)
|
||||||
|
RepDCBlock5 = tf.keras.layers.BatchNormalization()(RepDCBlock5)
|
||||||
|
|
||||||
|
if is_first_time:
|
||||||
|
# step two
|
||||||
|
# 重现原数据
|
||||||
|
# 接block3
|
||||||
|
GRU1 = self.GRU1(RepDCBlock3)
|
||||||
|
GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
|
||||||
|
d1 = self.d1(GRU1)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output1 = self.output1(d1)
|
||||||
|
# 接block4
|
||||||
|
GRU2 = self.GRU2(RepDCBlock4)
|
||||||
|
GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
|
||||||
|
d2 = self.d2(GRU2)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output2 = self.output2(d2)
|
||||||
|
# 接block5
|
||||||
|
GRU3 = self.GRU3(RepDCBlock5)
|
||||||
|
GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
|
||||||
|
d3 = self.d3(GRU3)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output3 = self.output3(d3)
|
||||||
|
|
||||||
|
# reduce_mean降维计算均值
|
||||||
|
MSE_loss1 = SmoothL1Loss()(y_true=label1, y_pred=output1)
|
||||||
|
MSE_loss2 = SmoothL1Loss()(y_true=label1, y_pred=output2)
|
||||||
|
MSE_loss3 = SmoothL1Loss()(y_true=label1, y_pred=output3)
|
||||||
|
# MSE_loss1 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output1))
|
||||||
|
# MSE_loss2 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output2))
|
||||||
|
# MSE_loss3 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output3))
|
||||||
|
|
||||||
|
print("MSE_loss1:", MSE_loss1.numpy())
|
||||||
|
print("MSE_loss2:", MSE_loss2.numpy())
|
||||||
|
print("MSE_loss3:", MSE_loss3.numpy())
|
||||||
|
loss = MSE_loss1 + MSE_loss2 + MSE_loss3
|
||||||
|
Accuracy_num = 0
|
||||||
|
|
||||||
|
else:
|
||||||
|
# step two
|
||||||
|
# 重现原数据
|
||||||
|
# 接block3
|
||||||
|
GRU1 = self.GRU1(RepDCBlock3)
|
||||||
|
GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
|
||||||
|
d1 = self.d1(GRU1)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output1 = self.output1(d1)
|
||||||
|
# 接block4
|
||||||
|
GRU2 = self.GRU2(RepDCBlock4)
|
||||||
|
GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
|
||||||
|
d2 = self.d2(GRU2)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output2 = self.output2(d2)
|
||||||
|
# 接block5
|
||||||
|
GRU3 = self.GRU3(RepDCBlock5)
|
||||||
|
GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
|
||||||
|
d3 = self.d3(GRU3)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output3 = self.output3(d3)
|
||||||
|
|
||||||
|
# 多尺度动态池化
|
||||||
|
# p1 = self.p1(output1)
|
||||||
|
# B, _, _ = p1.shape
|
||||||
|
# f1 = tf.reshape(p1, shape=[B, -1])
|
||||||
|
# p2 = self.p2(output2)
|
||||||
|
# f2 = tf.reshape(p2, shape=[B, -1])
|
||||||
|
# p3 = self.p3(output3)
|
||||||
|
# f3 = tf.reshape(p3, shape=[B, -1])
|
||||||
|
# step three
|
||||||
|
# 分类器
|
||||||
|
concat3 = tf.concat([output1, output2], axis=1)
|
||||||
|
# dropout = tf.keras.layers.Dropout(0.25)(concat3)
|
||||||
|
d4 = self.d4(concat3)
|
||||||
|
d5 = self.d5(d4)
|
||||||
|
# d4 = tf.keras.layers.BatchNormalization()(d4)
|
||||||
|
output4 = self.output4(d5)
|
||||||
|
|
||||||
|
# reduce_mean降维计算均值
|
||||||
|
MSE_loss= 0
|
||||||
|
# MSE_loss = SmoothL1Loss()(y_true=pred_3, y_pred=output1)
|
||||||
|
# MSE_loss += SmoothL1Loss()(y_true=pred_4, y_pred=output2)
|
||||||
|
# MSE_loss += SmoothL1Loss()(y_true=pred_5, y_pred=output3)
|
||||||
|
Cross_Entropy_loss = tf.reduce_mean(
|
||||||
|
tf.losses.binary_crossentropy(y_true=label2, y_pred=output4, from_logits=True))
|
||||||
|
|
||||||
|
print("MSE_loss:", MSE_loss)
|
||||||
|
print("Cross_Entropy_loss:", Cross_Entropy_loss.numpy())
|
||||||
|
Accuracy_num = self.get_Accuracy(label=label2, output=output4)
|
||||||
|
loss = Cross_Entropy_loss
|
||||||
|
return loss, Accuracy_num
|
||||||
|
|
||||||
|
def get_Accuracy(self, output, label):
|
||||||
|
|
||||||
|
predict_label = tf.round(output)
|
||||||
|
label = tf.cast(label, dtype=tf.float32)
|
||||||
|
|
||||||
|
t = np.array(label - predict_label)
|
||||||
|
|
||||||
|
b = t[t[:] == 0]
|
||||||
|
|
||||||
|
return b.__len__()
|
||||||
|
|
||||||
|
def get_grad(self, input_tensor, label1=None, label2=None, is_first_time: bool = True, pred_3=None, pred_4=None,
|
||||||
|
pred_5=None):
|
||||||
|
with tf.GradientTape() as tape:
|
||||||
|
# todo 原本tape只会监控由tf.Variable创建的trainable=True属性
|
||||||
|
# tape.watch(self.variables)
|
||||||
|
L, Accuracy_num = self.get_loss(input_tensor, label1=label1, label2=label2, is_first_time=is_first_time,
|
||||||
|
pred_3=pred_3,
|
||||||
|
pred_4=pred_4, pred_5=pred_5)
|
||||||
|
# 保存一下loss,用于输出
|
||||||
|
self.train_loss = L
|
||||||
|
g = tape.gradient(L, self.variables)
|
||||||
|
return g, Accuracy_num
|
||||||
|
|
||||||
|
def train(self, input_tensor, label1=None, label2=None, learning_rate=1e-3, is_first_time: bool = True, pred_3=None,
|
||||||
|
pred_4=None, pred_5=None):
|
||||||
|
g, Accuracy_num = self.get_grad(input_tensor, label1=label1, label2=label2, is_first_time=is_first_time,
|
||||||
|
pred_3=pred_3,
|
||||||
|
pred_4=pred_4, pred_5=pred_5)
|
||||||
|
optimizers.Adam(learning_rate).apply_gradients(zip(g, self.variables))
|
||||||
|
return self.train_loss, Accuracy_num
|
||||||
|
|
||||||
|
# 暂时只支持batch_size等于1,不然要传z比较麻烦
|
||||||
|
def get_val_loss(self, val_data, val_label1, val_label2, batch_size=16, is_first_time: bool = True,
|
||||||
|
step_one_model=None):
|
||||||
|
val_loss = []
|
||||||
|
accuracy_num = 0
|
||||||
|
output1 = 0
|
||||||
|
output2 = 0
|
||||||
|
output3 = 0
|
||||||
|
z = 1
|
||||||
|
size, length, dims = val_data.shape
|
||||||
|
if batch_size == None:
|
||||||
|
batch_size = self.batch_size
|
||||||
|
for epoch in range(0, size - batch_size, batch_size):
|
||||||
|
each_val_data = val_data[epoch:epoch + batch_size, :, :]
|
||||||
|
each_val_label1 = val_label1[epoch:epoch + batch_size, :]
|
||||||
|
each_val_label2 = val_label2[epoch:epoch + batch_size, ]
|
||||||
|
# each_val_data = tf.expand_dims(each_val_data, axis=0)
|
||||||
|
# each_val_query = tf.expand_dims(each_val_query, axis=0)
|
||||||
|
# each_val_label = tf.expand_dims(each_val_label, axis=0)
|
||||||
|
if not is_first_time:
|
||||||
|
output1, output2, output3, _ = step_one_model.call(inputs=each_val_data, is_first_time=True)
|
||||||
|
|
||||||
|
each_loss, each_accuracy_num = self.get_loss(each_val_data, each_val_label1, each_val_label2,
|
||||||
|
is_first_time=is_first_time,
|
||||||
|
pred_3=output1, pred_4=output2, pred_5=output3)
|
||||||
|
accuracy_num += each_accuracy_num
|
||||||
|
val_loss.append(each_loss)
|
||||||
|
z += 1
|
||||||
|
|
||||||
|
val_accuracy = accuracy_num / ((z-1) * batch_size)
|
||||||
|
val_total_loss = tf.reduce_mean(val_loss)
|
||||||
|
return val_total_loss, val_accuracy
|
||||||
|
|
||||||
|
|
||||||
|
class RevConv(keras.layers.Layer):
|
||||||
|
|
||||||
|
def __init__(self, kernel_size=3):
|
||||||
|
# 调用父类__init__()方法
|
||||||
|
super(RevConv, self).__init__()
|
||||||
|
self.kernel_size = kernel_size
|
||||||
|
|
||||||
|
def get_config(self):
|
||||||
|
# 自定义层里面的属性
|
||||||
|
config = (
|
||||||
|
{
|
||||||
|
'kernel_size': self.kernel_size
|
||||||
|
}
|
||||||
|
)
|
||||||
|
base_config = super(RevConv, self).get_config()
|
||||||
|
return dict(list(base_config.items()) + list(config.items()))
|
||||||
|
|
||||||
|
def build(self, input_shape):
|
||||||
|
# print(input_shape)
|
||||||
|
_, _, output_dim = input_shape[0], input_shape[1], input_shape[2]
|
||||||
|
self.conv1 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=self.kernel_size, strides=1,
|
||||||
|
padding='causal',
|
||||||
|
dilation_rate=4)
|
||||||
|
|
||||||
|
self.conv2 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=1, strides=1, padding='causal',
|
||||||
|
dilation_rate=4)
|
||||||
|
# self.b2 = tf.keras.layers.BatchNormalization()
|
||||||
|
|
||||||
|
# self.b3 = tf.keras.layers.BatchNormalization()
|
||||||
|
|
||||||
|
# out = tf.keras.layers.Add()([b1, b2, b3])
|
||||||
|
# out = tf.nn.relu(out)
|
||||||
|
|
||||||
|
def call(self, inputs, **kwargs):
|
||||||
|
conv1 = self.conv1(inputs)
|
||||||
|
b1 = tf.keras.layers.BatchNormalization()(conv1)
|
||||||
|
b1 = tf.nn.leaky_relu(b1)
|
||||||
|
# b1 = self.b1
|
||||||
|
|
||||||
|
conv2 = self.conv2(inputs)
|
||||||
|
b2 = tf.keras.layers.BatchNormalization()(conv2)
|
||||||
|
b2 = tf.nn.leaky_relu(b2)
|
||||||
|
|
||||||
|
b3 = tf.keras.layers.BatchNormalization()(inputs)
|
||||||
|
|
||||||
|
out = tf.keras.layers.Add()([b1, b2, b3])
|
||||||
|
out = tf.nn.relu(out)
|
||||||
|
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
class RevConvBlock(keras.layers.Layer):
|
||||||
|
|
||||||
|
def __init__(self, num: int = 3, kernel_size=3):
|
||||||
|
# 调用父类__init__()方法
|
||||||
|
super(RevConvBlock, self).__init__()
|
||||||
|
self.num = num
|
||||||
|
self.kernel_size = kernel_size
|
||||||
|
self.L = []
|
||||||
|
for i in range(num):
|
||||||
|
RepVGG = RevConv(kernel_size=kernel_size)
|
||||||
|
self.L.append(RepVGG)
|
||||||
|
|
||||||
|
def get_config(self):
|
||||||
|
# 自定义层里面的属性
|
||||||
|
config = (
|
||||||
|
{
|
||||||
|
'kernel_size': self.kernel_size,
|
||||||
|
'num': self.num
|
||||||
|
}
|
||||||
|
)
|
||||||
|
base_config = super(RevConvBlock, self).get_config()
|
||||||
|
return dict(list(base_config.items()) + list(config.items()))
|
||||||
|
|
||||||
|
def call(self, inputs, **kwargs):
|
||||||
|
for i in range(self.num):
|
||||||
|
inputs = self.L[i](inputs)
|
||||||
|
return inputs
|
||||||
|
|
@ -0,0 +1,455 @@
|
||||||
|
# _*_ coding: UTF-8 _*_
|
||||||
|
|
||||||
|
|
||||||
|
'''
|
||||||
|
@Author : dingjiawen
|
||||||
|
@Date : 2022/7/14 9:40
|
||||||
|
@Usage : 联合监测模型
|
||||||
|
@Desc : RNet:DCAU只分类,不预测
|
||||||
|
'''
|
||||||
|
|
||||||
|
import tensorflow as tf
|
||||||
|
import tensorflow.keras as keras
|
||||||
|
from tensorflow.keras import *
|
||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
from model.DepthwiseCon1D.DepthwiseConv1D import DepthwiseConv1D
|
||||||
|
from model.Dynamic_channelAttention.Dynamic_channelAttention import DynamicChannelAttention, DynamicPooling
|
||||||
|
from condition_monitoring.data_deal import loadData
|
||||||
|
from model.LossFunction.smooth_L1_Loss import SmoothL1Loss
|
||||||
|
|
||||||
|
|
||||||
|
class Joint_Monitoring(keras.Model):
|
||||||
|
|
||||||
|
def __init__(self, conv_filter=20):
|
||||||
|
# 调用父类__init__()方法
|
||||||
|
super(Joint_Monitoring, self).__init__()
|
||||||
|
# step one
|
||||||
|
self.RepDCBlock1 = RevConvBlock(num=3, kernel_size=5)
|
||||||
|
self.conv1 = tf.keras.layers.Conv1D(filters=conv_filter, kernel_size=1, strides=2, padding='SAME')
|
||||||
|
self.upsample1 = tf.keras.layers.UpSampling1D(size=2)
|
||||||
|
|
||||||
|
self.DACU2 = DynamicChannelAttention()
|
||||||
|
self.RepDCBlock2 = RevConvBlock(num=3, kernel_size=3)
|
||||||
|
self.conv2 = tf.keras.layers.Conv1D(filters=2 * conv_filter, kernel_size=1, strides=2, padding='SAME')
|
||||||
|
self.upsample2 = tf.keras.layers.UpSampling1D(size=2)
|
||||||
|
|
||||||
|
self.DACU3 = DynamicChannelAttention()
|
||||||
|
self.RepDCBlock3 = RevConvBlock(num=3, kernel_size=3)
|
||||||
|
self.p1 = DynamicPooling(pool_size=2)
|
||||||
|
self.conv3 = tf.keras.layers.Conv1D(filters=2 * conv_filter, kernel_size=3, strides=2, padding='SAME')
|
||||||
|
|
||||||
|
self.DACU4 = DynamicChannelAttention()
|
||||||
|
self.RepDCBlock4 = RevConvBlock(num=3, kernel_size=3)
|
||||||
|
self.p2 = DynamicPooling(pool_size=4)
|
||||||
|
self.conv4 = tf.keras.layers.Conv1D(filters=conv_filter, kernel_size=3, strides=2, padding='SAME')
|
||||||
|
|
||||||
|
self.RepDCBlock5 = RevConvBlock(num=3, kernel_size=3)
|
||||||
|
self.p3 = DynamicPooling(pool_size=2)
|
||||||
|
|
||||||
|
# step two
|
||||||
|
# 重现原数据
|
||||||
|
self.GRU1 = tf.keras.layers.GRU(128, return_sequences=False)
|
||||||
|
self.d1 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu)
|
||||||
|
self.output1 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu)
|
||||||
|
|
||||||
|
self.GRU2 = tf.keras.layers.GRU(128, return_sequences=False)
|
||||||
|
self.d2 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu)
|
||||||
|
self.output2 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu)
|
||||||
|
|
||||||
|
self.GRU3 = tf.keras.layers.GRU(128, return_sequences=False)
|
||||||
|
self.d3 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu)
|
||||||
|
self.output3 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu)
|
||||||
|
|
||||||
|
# step three
|
||||||
|
# 分类器
|
||||||
|
self.d4 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu)
|
||||||
|
self.d5 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu)
|
||||||
|
# tf.nn.softmax
|
||||||
|
self.output4 = tf.keras.layers.Dense(1, activation=tf.nn.sigmoid)
|
||||||
|
|
||||||
|
|
||||||
|
# loss
|
||||||
|
self.train_loss = []
|
||||||
|
|
||||||
|
def call(self, inputs, training=None, mask=None, is_first_time: bool = True):
|
||||||
|
# step one
|
||||||
|
RepDCBlock1 = self.RepDCBlock1(inputs)
|
||||||
|
RepDCBlock1 = tf.keras.layers.BatchNormalization()(RepDCBlock1)
|
||||||
|
conv1 = self.conv1(RepDCBlock1)
|
||||||
|
conv1 = tf.nn.leaky_relu(conv1)
|
||||||
|
conv1 = tf.keras.layers.BatchNormalization()(conv1)
|
||||||
|
upsample1 = self.upsample1(conv1)
|
||||||
|
|
||||||
|
DACU2 = self.DACU2(upsample1)
|
||||||
|
DACU2 = tf.keras.layers.BatchNormalization()(DACU2)
|
||||||
|
RepDCBlock2 = self.RepDCBlock2(DACU2)
|
||||||
|
RepDCBlock2 = tf.keras.layers.BatchNormalization()(RepDCBlock2)
|
||||||
|
conv2 = self.conv2(RepDCBlock2)
|
||||||
|
conv2 = tf.nn.leaky_relu(conv2)
|
||||||
|
conv2 = tf.keras.layers.BatchNormalization()(conv2)
|
||||||
|
upsample2 = self.upsample2(conv2)
|
||||||
|
|
||||||
|
DACU3 = self.DACU3(upsample2)
|
||||||
|
DACU3 = tf.keras.layers.BatchNormalization()(DACU3)
|
||||||
|
RepDCBlock3 = self.RepDCBlock3(DACU3)
|
||||||
|
RepDCBlock3 = tf.keras.layers.BatchNormalization()(RepDCBlock3)
|
||||||
|
conv3 = self.conv3(RepDCBlock3)
|
||||||
|
conv3 = tf.nn.leaky_relu(conv3)
|
||||||
|
conv3 = tf.keras.layers.BatchNormalization()(conv3)
|
||||||
|
|
||||||
|
concat1 = tf.concat([conv2, conv3], axis=1)
|
||||||
|
|
||||||
|
DACU4 = self.DACU4(concat1)
|
||||||
|
DACU4 = tf.keras.layers.BatchNormalization()(DACU4)
|
||||||
|
RepDCBlock4 = self.RepDCBlock4(DACU4)
|
||||||
|
RepDCBlock4 = tf.keras.layers.BatchNormalization()(RepDCBlock4)
|
||||||
|
conv4 = self.conv4(RepDCBlock4)
|
||||||
|
conv4 = tf.nn.leaky_relu(conv4)
|
||||||
|
conv4 = tf.keras.layers.BatchNormalization()(conv4)
|
||||||
|
|
||||||
|
concat2 = tf.concat([conv1, conv4], axis=1)
|
||||||
|
|
||||||
|
RepDCBlock5 = self.RepDCBlock5(concat2)
|
||||||
|
RepDCBlock5 = tf.keras.layers.BatchNormalization()(RepDCBlock5)
|
||||||
|
|
||||||
|
output1 = []
|
||||||
|
output2 = []
|
||||||
|
output3 = []
|
||||||
|
output4 = []
|
||||||
|
|
||||||
|
if is_first_time:
|
||||||
|
# step two
|
||||||
|
# 重现原数据
|
||||||
|
# 接block3
|
||||||
|
GRU1 = self.GRU1(RepDCBlock3)
|
||||||
|
GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
|
||||||
|
d1 = self.d1(GRU1)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output1 = self.output1(d1)
|
||||||
|
# 接block4
|
||||||
|
GRU2 = self.GRU2(RepDCBlock4)
|
||||||
|
GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
|
||||||
|
d2 = self.d2(GRU2)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output2 = self.output2(d2)
|
||||||
|
# 接block5
|
||||||
|
GRU3 = self.GRU3(RepDCBlock5)
|
||||||
|
GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
|
||||||
|
d3 = self.d3(GRU3)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output3 = self.output3(d3)
|
||||||
|
else:
|
||||||
|
GRU1 = self.GRU1(RepDCBlock3)
|
||||||
|
GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
|
||||||
|
d1 = self.d1(GRU1)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output1 = self.output1(d1)
|
||||||
|
# 接block4
|
||||||
|
GRU2 = self.GRU2(RepDCBlock4)
|
||||||
|
GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
|
||||||
|
d2 = self.d2(GRU2)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output2 = self.output2(d2)
|
||||||
|
# 接block5
|
||||||
|
GRU3 = self.GRU3(RepDCBlock5)
|
||||||
|
GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
|
||||||
|
d3 = self.d3(GRU3)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output3 = self.output3(d3)
|
||||||
|
|
||||||
|
# 多尺度动态池化
|
||||||
|
# p1 = self.p1(output1)
|
||||||
|
# B, _, _ = p1.shape
|
||||||
|
# f1 = tf.reshape(p1, shape=[B, -1])
|
||||||
|
# p2 = self.p2(output2)
|
||||||
|
# f2 = tf.reshape(p2, shape=[B, -1])
|
||||||
|
# p3 = self.p3(output3)
|
||||||
|
# f3 = tf.reshape(p3, shape=[B, -1])
|
||||||
|
# step three
|
||||||
|
# 分类器
|
||||||
|
concat3 = tf.concat([output1, output3], axis=1)
|
||||||
|
# dropout = tf.keras.layers.Dropout(0.25)(concat3)
|
||||||
|
d4 = self.d4(concat3)
|
||||||
|
d5 = self.d5(d4)
|
||||||
|
# d4 = tf.keras.layers.BatchNormalization()(d4)
|
||||||
|
output4 = self.output4(d5)
|
||||||
|
|
||||||
|
return output1, output2, output3, output4
|
||||||
|
|
||||||
|
def get_loss(self, inputs_tensor, label1=None, label2=None, is_first_time: bool = True, pred_3=None, pred_4=None,
|
||||||
|
pred_5=None):
|
||||||
|
# step one
|
||||||
|
RepDCBlock1 = self.RepDCBlock1(inputs_tensor)
|
||||||
|
RepDCBlock1 = tf.keras.layers.BatchNormalization()(RepDCBlock1)
|
||||||
|
conv1 = self.conv1(RepDCBlock1)
|
||||||
|
conv1 = tf.nn.leaky_relu(conv1)
|
||||||
|
conv1 = tf.keras.layers.BatchNormalization()(conv1)
|
||||||
|
upsample1 = self.upsample1(conv1)
|
||||||
|
|
||||||
|
DACU2 = self.DACU2(upsample1)
|
||||||
|
DACU2 = tf.keras.layers.BatchNormalization()(DACU2)
|
||||||
|
RepDCBlock2 = self.RepDCBlock2(DACU2)
|
||||||
|
RepDCBlock2 = tf.keras.layers.BatchNormalization()(RepDCBlock2)
|
||||||
|
conv2 = self.conv2(RepDCBlock2)
|
||||||
|
conv2 = tf.nn.leaky_relu(conv2)
|
||||||
|
conv2 = tf.keras.layers.BatchNormalization()(conv2)
|
||||||
|
upsample2 = self.upsample2(conv2)
|
||||||
|
|
||||||
|
DACU3 = self.DACU3(upsample2)
|
||||||
|
DACU3 = tf.keras.layers.BatchNormalization()(DACU3)
|
||||||
|
RepDCBlock3 = self.RepDCBlock3(DACU3)
|
||||||
|
RepDCBlock3 = tf.keras.layers.BatchNormalization()(RepDCBlock3)
|
||||||
|
conv3 = self.conv3(RepDCBlock3)
|
||||||
|
conv3 = tf.nn.leaky_relu(conv3)
|
||||||
|
conv3 = tf.keras.layers.BatchNormalization()(conv3)
|
||||||
|
|
||||||
|
concat1 = tf.concat([conv2, conv3], axis=1)
|
||||||
|
|
||||||
|
DACU4 = self.DACU4(concat1)
|
||||||
|
DACU4 = tf.keras.layers.BatchNormalization()(DACU4)
|
||||||
|
RepDCBlock4 = self.RepDCBlock4(DACU4)
|
||||||
|
RepDCBlock4 = tf.keras.layers.BatchNormalization()(RepDCBlock4)
|
||||||
|
conv4 = self.conv4(RepDCBlock4)
|
||||||
|
conv4 = tf.nn.leaky_relu(conv4)
|
||||||
|
conv4 = tf.keras.layers.BatchNormalization()(conv4)
|
||||||
|
|
||||||
|
concat2 = tf.concat([conv1, conv4], axis=1)
|
||||||
|
|
||||||
|
RepDCBlock5 = self.RepDCBlock5(concat2)
|
||||||
|
RepDCBlock5 = tf.keras.layers.BatchNormalization()(RepDCBlock5)
|
||||||
|
|
||||||
|
if is_first_time:
|
||||||
|
# step two
|
||||||
|
# 重现原数据
|
||||||
|
# 接block3
|
||||||
|
GRU1 = self.GRU1(RepDCBlock3)
|
||||||
|
GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
|
||||||
|
d1 = self.d1(GRU1)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output1 = self.output1(d1)
|
||||||
|
# 接block4
|
||||||
|
GRU2 = self.GRU2(RepDCBlock4)
|
||||||
|
GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
|
||||||
|
d2 = self.d2(GRU2)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output2 = self.output2(d2)
|
||||||
|
# 接block5
|
||||||
|
GRU3 = self.GRU3(RepDCBlock5)
|
||||||
|
GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
|
||||||
|
d3 = self.d3(GRU3)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output3 = self.output3(d3)
|
||||||
|
|
||||||
|
# reduce_mean降维计算均值
|
||||||
|
MSE_loss1 = SmoothL1Loss()(y_true=label1, y_pred=output1)
|
||||||
|
MSE_loss2 = SmoothL1Loss()(y_true=label1, y_pred=output2)
|
||||||
|
MSE_loss3 = SmoothL1Loss()(y_true=label1, y_pred=output3)
|
||||||
|
# MSE_loss1 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output1))
|
||||||
|
# MSE_loss2 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output2))
|
||||||
|
# MSE_loss3 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output3))
|
||||||
|
|
||||||
|
print("MSE_loss1:", MSE_loss1.numpy())
|
||||||
|
print("MSE_loss2:", MSE_loss2.numpy())
|
||||||
|
print("MSE_loss3:", MSE_loss3.numpy())
|
||||||
|
loss = MSE_loss1 + MSE_loss2 + MSE_loss3
|
||||||
|
Accuracy_num = 0
|
||||||
|
|
||||||
|
else:
|
||||||
|
# step two
|
||||||
|
# 重现原数据
|
||||||
|
# 接block3
|
||||||
|
GRU1 = self.GRU1(RepDCBlock3)
|
||||||
|
GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
|
||||||
|
d1 = self.d1(GRU1)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output1 = self.output1(d1)
|
||||||
|
# 接block4
|
||||||
|
GRU2 = self.GRU2(RepDCBlock4)
|
||||||
|
GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
|
||||||
|
d2 = self.d2(GRU2)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output2 = self.output2(d2)
|
||||||
|
# 接block5
|
||||||
|
GRU3 = self.GRU3(RepDCBlock5)
|
||||||
|
GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
|
||||||
|
d3 = self.d3(GRU3)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output3 = self.output3(d3)
|
||||||
|
|
||||||
|
# 多尺度动态池化
|
||||||
|
# p1 = self.p1(output1)
|
||||||
|
# B, _, _ = p1.shape
|
||||||
|
# f1 = tf.reshape(p1, shape=[B, -1])
|
||||||
|
# p2 = self.p2(output2)
|
||||||
|
# f2 = tf.reshape(p2, shape=[B, -1])
|
||||||
|
# p3 = self.p3(output3)
|
||||||
|
# f3 = tf.reshape(p3, shape=[B, -1])
|
||||||
|
# step three
|
||||||
|
# 分类器
|
||||||
|
concat3 = tf.concat([output1, output3], axis=1)
|
||||||
|
# dropout = tf.keras.layers.Dropout(0.25)(concat3)
|
||||||
|
d4 = self.d4(concat3)
|
||||||
|
d5 = self.d5(d4)
|
||||||
|
# d4 = tf.keras.layers.BatchNormalization()(d4)
|
||||||
|
output4 = self.output4(d5)
|
||||||
|
|
||||||
|
# reduce_mean降维计算均值
|
||||||
|
MSE_loss= 0
|
||||||
|
# MSE_loss = SmoothL1Loss()(y_true=pred_3, y_pred=output1)
|
||||||
|
# MSE_loss += SmoothL1Loss()(y_true=pred_4, y_pred=output2)
|
||||||
|
# MSE_loss += SmoothL1Loss()(y_true=pred_5, y_pred=output3)
|
||||||
|
Cross_Entropy_loss = tf.reduce_mean(
|
||||||
|
tf.losses.binary_crossentropy(y_true=label2, y_pred=output4, from_logits=True))
|
||||||
|
|
||||||
|
print("MSE_loss:", MSE_loss)
|
||||||
|
print("Cross_Entropy_loss:", Cross_Entropy_loss.numpy())
|
||||||
|
Accuracy_num = self.get_Accuracy(label=label2, output=output4)
|
||||||
|
loss = Cross_Entropy_loss
|
||||||
|
return loss, Accuracy_num
|
||||||
|
|
||||||
|
def get_Accuracy(self, output, label):
|
||||||
|
|
||||||
|
predict_label = tf.round(output)
|
||||||
|
label = tf.cast(label, dtype=tf.float32)
|
||||||
|
|
||||||
|
t = np.array(label - predict_label)
|
||||||
|
|
||||||
|
b = t[t[:] == 0]
|
||||||
|
|
||||||
|
return b.__len__()
|
||||||
|
|
||||||
|
def get_grad(self, input_tensor, label1=None, label2=None, is_first_time: bool = True, pred_3=None, pred_4=None,
|
||||||
|
pred_5=None):
|
||||||
|
with tf.GradientTape() as tape:
|
||||||
|
# todo 原本tape只会监控由tf.Variable创建的trainable=True属性
|
||||||
|
# tape.watch(self.variables)
|
||||||
|
L, Accuracy_num = self.get_loss(input_tensor, label1=label1, label2=label2, is_first_time=is_first_time,
|
||||||
|
pred_3=pred_3,
|
||||||
|
pred_4=pred_4, pred_5=pred_5)
|
||||||
|
# 保存一下loss,用于输出
|
||||||
|
self.train_loss = L
|
||||||
|
g = tape.gradient(L, self.variables)
|
||||||
|
return g, Accuracy_num
|
||||||
|
|
||||||
|
def train(self, input_tensor, label1=None, label2=None, learning_rate=1e-3, is_first_time: bool = True, pred_3=None,
|
||||||
|
pred_4=None, pred_5=None):
|
||||||
|
g, Accuracy_num = self.get_grad(input_tensor, label1=label1, label2=label2, is_first_time=is_first_time,
|
||||||
|
pred_3=pred_3,
|
||||||
|
pred_4=pred_4, pred_5=pred_5)
|
||||||
|
optimizers.Adam(learning_rate).apply_gradients(zip(g, self.variables))
|
||||||
|
return self.train_loss, Accuracy_num
|
||||||
|
|
||||||
|
# 暂时只支持batch_size等于1,不然要传z比较麻烦
|
||||||
|
def get_val_loss(self, val_data, val_label1, val_label2, batch_size=16, is_first_time: bool = True,
|
||||||
|
step_one_model=None):
|
||||||
|
val_loss = []
|
||||||
|
accuracy_num = 0
|
||||||
|
output1 = 0
|
||||||
|
output2 = 0
|
||||||
|
output3 = 0
|
||||||
|
z = 1
|
||||||
|
size, length, dims = val_data.shape
|
||||||
|
if batch_size == None:
|
||||||
|
batch_size = self.batch_size
|
||||||
|
for epoch in range(0, size - batch_size, batch_size):
|
||||||
|
each_val_data = val_data[epoch:epoch + batch_size, :, :]
|
||||||
|
each_val_label1 = val_label1[epoch:epoch + batch_size, :]
|
||||||
|
each_val_label2 = val_label2[epoch:epoch + batch_size, ]
|
||||||
|
# each_val_data = tf.expand_dims(each_val_data, axis=0)
|
||||||
|
# each_val_query = tf.expand_dims(each_val_query, axis=0)
|
||||||
|
# each_val_label = tf.expand_dims(each_val_label, axis=0)
|
||||||
|
if not is_first_time:
|
||||||
|
output1, output2, output3, _ = step_one_model.call(inputs=each_val_data, is_first_time=True)
|
||||||
|
|
||||||
|
each_loss, each_accuracy_num = self.get_loss(each_val_data, each_val_label1, each_val_label2,
|
||||||
|
is_first_time=is_first_time,
|
||||||
|
pred_3=output1, pred_4=output2, pred_5=output3)
|
||||||
|
accuracy_num += each_accuracy_num
|
||||||
|
val_loss.append(each_loss)
|
||||||
|
z += 1
|
||||||
|
|
||||||
|
val_accuracy = accuracy_num / ((z-1) * batch_size)
|
||||||
|
val_total_loss = tf.reduce_mean(val_loss)
|
||||||
|
return val_total_loss, val_accuracy
|
||||||
|
|
||||||
|
|
||||||
|
class RevConv(keras.layers.Layer):
|
||||||
|
|
||||||
|
def __init__(self, kernel_size=3):
|
||||||
|
# 调用父类__init__()方法
|
||||||
|
super(RevConv, self).__init__()
|
||||||
|
self.kernel_size = kernel_size
|
||||||
|
|
||||||
|
def get_config(self):
|
||||||
|
# 自定义层里面的属性
|
||||||
|
config = (
|
||||||
|
{
|
||||||
|
'kernel_size': self.kernel_size
|
||||||
|
}
|
||||||
|
)
|
||||||
|
base_config = super(RevConv, self).get_config()
|
||||||
|
return dict(list(base_config.items()) + list(config.items()))
|
||||||
|
|
||||||
|
def build(self, input_shape):
|
||||||
|
# print(input_shape)
|
||||||
|
_, _, output_dim = input_shape[0], input_shape[1], input_shape[2]
|
||||||
|
self.conv1 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=self.kernel_size, strides=1,
|
||||||
|
padding='causal',
|
||||||
|
dilation_rate=4)
|
||||||
|
|
||||||
|
self.conv2 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=1, strides=1, padding='causal',
|
||||||
|
dilation_rate=4)
|
||||||
|
# self.b2 = tf.keras.layers.BatchNormalization()
|
||||||
|
|
||||||
|
# self.b3 = tf.keras.layers.BatchNormalization()
|
||||||
|
|
||||||
|
# out = tf.keras.layers.Add()([b1, b2, b3])
|
||||||
|
# out = tf.nn.relu(out)
|
||||||
|
|
||||||
|
def call(self, inputs, **kwargs):
|
||||||
|
conv1 = self.conv1(inputs)
|
||||||
|
b1 = tf.keras.layers.BatchNormalization()(conv1)
|
||||||
|
b1 = tf.nn.leaky_relu(b1)
|
||||||
|
# b1 = self.b1
|
||||||
|
|
||||||
|
conv2 = self.conv2(inputs)
|
||||||
|
b2 = tf.keras.layers.BatchNormalization()(conv2)
|
||||||
|
b2 = tf.nn.leaky_relu(b2)
|
||||||
|
|
||||||
|
b3 = tf.keras.layers.BatchNormalization()(inputs)
|
||||||
|
|
||||||
|
out = tf.keras.layers.Add()([b1, b2, b3])
|
||||||
|
out = tf.nn.relu(out)
|
||||||
|
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
class RevConvBlock(keras.layers.Layer):
|
||||||
|
|
||||||
|
def __init__(self, num: int = 3, kernel_size=3):
|
||||||
|
# 调用父类__init__()方法
|
||||||
|
super(RevConvBlock, self).__init__()
|
||||||
|
self.num = num
|
||||||
|
self.kernel_size = kernel_size
|
||||||
|
self.L = []
|
||||||
|
for i in range(num):
|
||||||
|
RepVGG = RevConv(kernel_size=kernel_size)
|
||||||
|
self.L.append(RepVGG)
|
||||||
|
|
||||||
|
def get_config(self):
|
||||||
|
# 自定义层里面的属性
|
||||||
|
config = (
|
||||||
|
{
|
||||||
|
'kernel_size': self.kernel_size,
|
||||||
|
'num': self.num
|
||||||
|
}
|
||||||
|
)
|
||||||
|
base_config = super(RevConvBlock, self).get_config()
|
||||||
|
return dict(list(base_config.items()) + list(config.items()))
|
||||||
|
|
||||||
|
def call(self, inputs, **kwargs):
|
||||||
|
for i in range(self.num):
|
||||||
|
inputs = self.L[i](inputs)
|
||||||
|
return inputs
|
||||||
|
|
@ -0,0 +1,457 @@
|
||||||
|
# _*_ coding: UTF-8 _*_
|
||||||
|
|
||||||
|
|
||||||
|
'''
|
||||||
|
@Author : dingjiawen
|
||||||
|
@Date : 2022/7/14 9:40
|
||||||
|
@Usage : 联合监测模型
|
||||||
|
@Desc : RNet:DCAU只分类,不预测
|
||||||
|
'''
|
||||||
|
|
||||||
|
import tensorflow as tf
|
||||||
|
import tensorflow.keras as keras
|
||||||
|
from tensorflow.keras import *
|
||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
from model.DepthwiseCon1D.DepthwiseConv1D import DepthwiseConv1D
|
||||||
|
from model.Dynamic_channelAttention.Dynamic_channelAttention import DynamicChannelAttention, DynamicPooling
|
||||||
|
from condition_monitoring.data_deal import loadData
|
||||||
|
from model.LossFunction.smooth_L1_Loss import SmoothL1Loss
|
||||||
|
|
||||||
|
|
||||||
|
class Joint_Monitoring(keras.Model):
|
||||||
|
|
||||||
|
def __init__(self, conv_filter=20):
|
||||||
|
# 调用父类__init__()方法
|
||||||
|
super(Joint_Monitoring, self).__init__()
|
||||||
|
# step one
|
||||||
|
self.RepDCBlock1 = RevConvBlock(num=3, kernel_size=5)
|
||||||
|
self.conv1 = tf.keras.layers.Conv1D(filters=conv_filter, kernel_size=1, strides=2, padding='SAME')
|
||||||
|
self.upsample1 = tf.keras.layers.UpSampling1D(size=2)
|
||||||
|
|
||||||
|
self.DACU2 = DynamicChannelAttention()
|
||||||
|
self.RepDCBlock2 = RevConvBlock(num=3, kernel_size=3)
|
||||||
|
self.conv2 = tf.keras.layers.Conv1D(filters=2 * conv_filter, kernel_size=1, strides=2, padding='SAME')
|
||||||
|
self.upsample2 = tf.keras.layers.UpSampling1D(size=2)
|
||||||
|
|
||||||
|
self.DACU3 = DynamicChannelAttention()
|
||||||
|
self.RepDCBlock3 = RevConvBlock(num=3, kernel_size=3)
|
||||||
|
self.p1 = DynamicPooling(pool_size=2)
|
||||||
|
self.conv3 = tf.keras.layers.Conv1D(filters=2 * conv_filter, kernel_size=3, strides=2, padding='SAME')
|
||||||
|
|
||||||
|
self.DACU4 = DynamicChannelAttention()
|
||||||
|
self.RepDCBlock4 = RevConvBlock(num=3, kernel_size=3)
|
||||||
|
self.p2 = DynamicPooling(pool_size=4)
|
||||||
|
self.conv4 = tf.keras.layers.Conv1D(filters=conv_filter, kernel_size=3, strides=2, padding='SAME')
|
||||||
|
|
||||||
|
self.RepDCBlock5 = RevConvBlock(num=3, kernel_size=3)
|
||||||
|
self.p3 = DynamicPooling(pool_size=2)
|
||||||
|
|
||||||
|
# step two
|
||||||
|
# 重现原数据
|
||||||
|
self.GRU1 = tf.keras.layers.GRU(128, return_sequences=False)
|
||||||
|
self.d1 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu)
|
||||||
|
self.output1 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu)
|
||||||
|
|
||||||
|
self.GRU2 = tf.keras.layers.GRU(128, return_sequences=False)
|
||||||
|
self.d2 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu)
|
||||||
|
self.output2 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu)
|
||||||
|
|
||||||
|
self.GRU3 = tf.keras.layers.GRU(128, return_sequences=False)
|
||||||
|
self.d3 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu)
|
||||||
|
self.output3 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu)
|
||||||
|
|
||||||
|
# step three
|
||||||
|
# 分类器
|
||||||
|
self.d4 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu)
|
||||||
|
self.d5 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu)
|
||||||
|
# tf.nn.softmax
|
||||||
|
self.output4 = tf.keras.layers.Dense(1, activation=tf.nn.sigmoid)
|
||||||
|
|
||||||
|
|
||||||
|
# loss
|
||||||
|
self.train_loss = []
|
||||||
|
|
||||||
|
def call(self, inputs, training=None, mask=None, is_first_time: bool = True):
|
||||||
|
# step one
|
||||||
|
RepDCBlock1 = self.RepDCBlock1(inputs)
|
||||||
|
RepDCBlock1 = tf.keras.layers.BatchNormalization()(RepDCBlock1)
|
||||||
|
conv1 = self.conv1(RepDCBlock1)
|
||||||
|
conv1 = tf.nn.leaky_relu(conv1)
|
||||||
|
conv1 = tf.keras.layers.BatchNormalization()(conv1)
|
||||||
|
upsample1 = self.upsample1(conv1)
|
||||||
|
|
||||||
|
DACU2 = self.DACU2(upsample1)
|
||||||
|
DACU2 = tf.keras.layers.BatchNormalization()(DACU2)
|
||||||
|
RepDCBlock2 = self.RepDCBlock2(DACU2)
|
||||||
|
RepDCBlock2 = tf.keras.layers.BatchNormalization()(RepDCBlock2)
|
||||||
|
conv2 = self.conv2(RepDCBlock2)
|
||||||
|
conv2 = tf.nn.leaky_relu(conv2)
|
||||||
|
conv2 = tf.keras.layers.BatchNormalization()(conv2)
|
||||||
|
upsample2 = self.upsample2(conv2)
|
||||||
|
|
||||||
|
DACU3 = self.DACU3(upsample2)
|
||||||
|
DACU3 = tf.keras.layers.BatchNormalization()(DACU3)
|
||||||
|
RepDCBlock3 = self.RepDCBlock3(DACU3)
|
||||||
|
RepDCBlock3 = tf.keras.layers.BatchNormalization()(RepDCBlock3)
|
||||||
|
conv3 = self.conv3(RepDCBlock3)
|
||||||
|
conv3 = tf.nn.leaky_relu(conv3)
|
||||||
|
conv3 = tf.keras.layers.BatchNormalization()(conv3)
|
||||||
|
|
||||||
|
concat1 = tf.concat([conv2, conv3], axis=1)
|
||||||
|
|
||||||
|
DACU4 = self.DACU4(concat1)
|
||||||
|
DACU4 = tf.keras.layers.BatchNormalization()(DACU4)
|
||||||
|
RepDCBlock4 = self.RepDCBlock4(DACU4)
|
||||||
|
RepDCBlock4 = tf.keras.layers.BatchNormalization()(RepDCBlock4)
|
||||||
|
conv4 = self.conv4(RepDCBlock4)
|
||||||
|
conv4 = tf.nn.leaky_relu(conv4)
|
||||||
|
conv4 = tf.keras.layers.BatchNormalization()(conv4)
|
||||||
|
|
||||||
|
concat2 = tf.concat([conv1, conv4], axis=1)
|
||||||
|
|
||||||
|
RepDCBlock5 = self.RepDCBlock5(concat2)
|
||||||
|
RepDCBlock5 = tf.keras.layers.BatchNormalization()(RepDCBlock5)
|
||||||
|
|
||||||
|
output1 = []
|
||||||
|
output2 = []
|
||||||
|
output3 = []
|
||||||
|
output4 = []
|
||||||
|
|
||||||
|
if is_first_time:
|
||||||
|
# step two
|
||||||
|
# 重现原数据
|
||||||
|
# 接block3
|
||||||
|
GRU1 = self.GRU1(RepDCBlock3)
|
||||||
|
GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
|
||||||
|
d1 = self.d1(GRU1)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output1 = self.output1(d1)
|
||||||
|
# 接block4
|
||||||
|
GRU2 = self.GRU2(RepDCBlock4)
|
||||||
|
GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
|
||||||
|
d2 = self.d2(GRU2)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output2 = self.output2(d2)
|
||||||
|
# 接block5
|
||||||
|
GRU3 = self.GRU3(RepDCBlock5)
|
||||||
|
GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
|
||||||
|
d3 = self.d3(GRU3)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output3 = self.output3(d3)
|
||||||
|
else:
|
||||||
|
GRU1 = self.GRU1(RepDCBlock3)
|
||||||
|
GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
|
||||||
|
d1 = self.d1(GRU1)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output1 = self.output1(d1)
|
||||||
|
# 接block4
|
||||||
|
GRU2 = self.GRU2(RepDCBlock4)
|
||||||
|
GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
|
||||||
|
d2 = self.d2(GRU2)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output2 = self.output2(d2)
|
||||||
|
# 接block5
|
||||||
|
GRU3 = self.GRU3(RepDCBlock5)
|
||||||
|
GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
|
||||||
|
d3 = self.d3(GRU3)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output3 = self.output3(d3)
|
||||||
|
|
||||||
|
# 多尺度动态池化
|
||||||
|
# p1 = self.p1(output1)
|
||||||
|
# B, _, _ = p1.shape
|
||||||
|
# f1 = tf.reshape(p1, shape=[B, -1])
|
||||||
|
# p2 = self.p2(output2)
|
||||||
|
# f2 = tf.reshape(p2, shape=[B, -1])
|
||||||
|
# p3 = self.p3(output3)
|
||||||
|
# f3 = tf.reshape(p3, shape=[B, -1])
|
||||||
|
# step three
|
||||||
|
# 分类器
|
||||||
|
# concat3 = tf.concat([output1, output2, output3], axis=1)
|
||||||
|
concat3 = output2
|
||||||
|
# dropout = tf.keras.layers.Dropout(0.25)(concat3)
|
||||||
|
d4 = self.d4(concat3)
|
||||||
|
d5 = self.d5(d4)
|
||||||
|
# d4 = tf.keras.layers.BatchNormalization()(d4)
|
||||||
|
output4 = self.output4(d5)
|
||||||
|
|
||||||
|
return output1, output2, output3, output4
|
||||||
|
|
||||||
|
def get_loss(self, inputs_tensor, label1=None, label2=None, is_first_time: bool = True, pred_3=None, pred_4=None,
|
||||||
|
pred_5=None):
|
||||||
|
# step one
|
||||||
|
RepDCBlock1 = self.RepDCBlock1(inputs_tensor)
|
||||||
|
RepDCBlock1 = tf.keras.layers.BatchNormalization()(RepDCBlock1)
|
||||||
|
conv1 = self.conv1(RepDCBlock1)
|
||||||
|
conv1 = tf.nn.leaky_relu(conv1)
|
||||||
|
conv1 = tf.keras.layers.BatchNormalization()(conv1)
|
||||||
|
upsample1 = self.upsample1(conv1)
|
||||||
|
|
||||||
|
DACU2 = self.DACU2(upsample1)
|
||||||
|
DACU2 = tf.keras.layers.BatchNormalization()(DACU2)
|
||||||
|
RepDCBlock2 = self.RepDCBlock2(DACU2)
|
||||||
|
RepDCBlock2 = tf.keras.layers.BatchNormalization()(RepDCBlock2)
|
||||||
|
conv2 = self.conv2(RepDCBlock2)
|
||||||
|
conv2 = tf.nn.leaky_relu(conv2)
|
||||||
|
conv2 = tf.keras.layers.BatchNormalization()(conv2)
|
||||||
|
upsample2 = self.upsample2(conv2)
|
||||||
|
|
||||||
|
DACU3 = self.DACU3(upsample2)
|
||||||
|
DACU3 = tf.keras.layers.BatchNormalization()(DACU3)
|
||||||
|
RepDCBlock3 = self.RepDCBlock3(DACU3)
|
||||||
|
RepDCBlock3 = tf.keras.layers.BatchNormalization()(RepDCBlock3)
|
||||||
|
conv3 = self.conv3(RepDCBlock3)
|
||||||
|
conv3 = tf.nn.leaky_relu(conv3)
|
||||||
|
conv3 = tf.keras.layers.BatchNormalization()(conv3)
|
||||||
|
|
||||||
|
concat1 = tf.concat([conv2, conv3], axis=1)
|
||||||
|
|
||||||
|
DACU4 = self.DACU4(concat1)
|
||||||
|
DACU4 = tf.keras.layers.BatchNormalization()(DACU4)
|
||||||
|
RepDCBlock4 = self.RepDCBlock4(DACU4)
|
||||||
|
RepDCBlock4 = tf.keras.layers.BatchNormalization()(RepDCBlock4)
|
||||||
|
conv4 = self.conv4(RepDCBlock4)
|
||||||
|
conv4 = tf.nn.leaky_relu(conv4)
|
||||||
|
conv4 = tf.keras.layers.BatchNormalization()(conv4)
|
||||||
|
|
||||||
|
concat2 = tf.concat([conv1, conv4], axis=1)
|
||||||
|
|
||||||
|
RepDCBlock5 = self.RepDCBlock5(concat2)
|
||||||
|
RepDCBlock5 = tf.keras.layers.BatchNormalization()(RepDCBlock5)
|
||||||
|
|
||||||
|
if is_first_time:
|
||||||
|
# step two
|
||||||
|
# 重现原数据
|
||||||
|
# 接block3
|
||||||
|
GRU1 = self.GRU1(RepDCBlock3)
|
||||||
|
GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
|
||||||
|
d1 = self.d1(GRU1)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output1 = self.output1(d1)
|
||||||
|
# 接block4
|
||||||
|
GRU2 = self.GRU2(RepDCBlock4)
|
||||||
|
GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
|
||||||
|
d2 = self.d2(GRU2)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output2 = self.output2(d2)
|
||||||
|
# 接block5
|
||||||
|
GRU3 = self.GRU3(RepDCBlock5)
|
||||||
|
GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
|
||||||
|
d3 = self.d3(GRU3)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output3 = self.output3(d3)
|
||||||
|
|
||||||
|
# reduce_mean降维计算均值
|
||||||
|
MSE_loss1 = SmoothL1Loss()(y_true=label1, y_pred=output1)
|
||||||
|
MSE_loss2 = SmoothL1Loss()(y_true=label1, y_pred=output2)
|
||||||
|
MSE_loss3 = SmoothL1Loss()(y_true=label1, y_pred=output3)
|
||||||
|
# MSE_loss1 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output1))
|
||||||
|
# MSE_loss2 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output2))
|
||||||
|
# MSE_loss3 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output3))
|
||||||
|
|
||||||
|
print("MSE_loss1:", MSE_loss1.numpy())
|
||||||
|
print("MSE_loss2:", MSE_loss2.numpy())
|
||||||
|
print("MSE_loss3:", MSE_loss3.numpy())
|
||||||
|
loss = MSE_loss1 + MSE_loss2 + MSE_loss3
|
||||||
|
Accuracy_num = 0
|
||||||
|
|
||||||
|
else:
|
||||||
|
# step two
|
||||||
|
# 重现原数据
|
||||||
|
# 接block3
|
||||||
|
GRU1 = self.GRU1(RepDCBlock3)
|
||||||
|
GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
|
||||||
|
d1 = self.d1(GRU1)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output1 = self.output1(d1)
|
||||||
|
# 接block4
|
||||||
|
GRU2 = self.GRU2(RepDCBlock4)
|
||||||
|
GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
|
||||||
|
d2 = self.d2(GRU2)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output2 = self.output2(d2)
|
||||||
|
# 接block5
|
||||||
|
GRU3 = self.GRU3(RepDCBlock5)
|
||||||
|
GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
|
||||||
|
d3 = self.d3(GRU3)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output3 = self.output3(d3)
|
||||||
|
|
||||||
|
# 多尺度动态池化
|
||||||
|
# p1 = self.p1(output1)
|
||||||
|
# B, _, _ = p1.shape
|
||||||
|
# f1 = tf.reshape(p1, shape=[B, -1])
|
||||||
|
# p2 = self.p2(output2)
|
||||||
|
# f2 = tf.reshape(p2, shape=[B, -1])
|
||||||
|
# p3 = self.p3(output3)
|
||||||
|
# f3 = tf.reshape(p3, shape=[B, -1])
|
||||||
|
# step three
|
||||||
|
# 分类器
|
||||||
|
# concat3 = tf.concat([output1, output2, output3], axis=1)
|
||||||
|
# dropout = tf.keras.layers.Dropout(0.25)(concat3)
|
||||||
|
concat3 = output2
|
||||||
|
d4 = self.d4(concat3)
|
||||||
|
d5 = self.d5(d4)
|
||||||
|
# d4 = tf.keras.layers.BatchNormalization()(d4)
|
||||||
|
output4 = self.output4(d5)
|
||||||
|
|
||||||
|
# reduce_mean降维计算均值
|
||||||
|
MSE_loss= 0
|
||||||
|
# MSE_loss = SmoothL1Loss()(y_true=pred_3, y_pred=output1)
|
||||||
|
# MSE_loss += SmoothL1Loss()(y_true=pred_4, y_pred=output2)
|
||||||
|
# MSE_loss += SmoothL1Loss()(y_true=pred_5, y_pred=output3)
|
||||||
|
Cross_Entropy_loss = tf.reduce_mean(
|
||||||
|
tf.losses.binary_crossentropy(y_true=label2, y_pred=output4, from_logits=True))
|
||||||
|
|
||||||
|
print("MSE_loss:", MSE_loss)
|
||||||
|
print("Cross_Entropy_loss:", Cross_Entropy_loss.numpy())
|
||||||
|
Accuracy_num = self.get_Accuracy(label=label2, output=output4)
|
||||||
|
loss = Cross_Entropy_loss
|
||||||
|
return loss, Accuracy_num
|
||||||
|
|
||||||
|
def get_Accuracy(self, output, label):
|
||||||
|
|
||||||
|
predict_label = tf.round(output)
|
||||||
|
label = tf.cast(label, dtype=tf.float32)
|
||||||
|
|
||||||
|
t = np.array(label - predict_label)
|
||||||
|
|
||||||
|
b = t[t[:] == 0]
|
||||||
|
|
||||||
|
return b.__len__()
|
||||||
|
|
||||||
|
def get_grad(self, input_tensor, label1=None, label2=None, is_first_time: bool = True, pred_3=None, pred_4=None,
|
||||||
|
pred_5=None):
|
||||||
|
with tf.GradientTape() as tape:
|
||||||
|
# todo 原本tape只会监控由tf.Variable创建的trainable=True属性
|
||||||
|
# tape.watch(self.variables)
|
||||||
|
L, Accuracy_num = self.get_loss(input_tensor, label1=label1, label2=label2, is_first_time=is_first_time,
|
||||||
|
pred_3=pred_3,
|
||||||
|
pred_4=pred_4, pred_5=pred_5)
|
||||||
|
# 保存一下loss,用于输出
|
||||||
|
self.train_loss = L
|
||||||
|
g = tape.gradient(L, self.variables)
|
||||||
|
return g, Accuracy_num
|
||||||
|
|
||||||
|
def train(self, input_tensor, label1=None, label2=None, learning_rate=1e-3, is_first_time: bool = True, pred_3=None,
|
||||||
|
pred_4=None, pred_5=None):
|
||||||
|
g, Accuracy_num = self.get_grad(input_tensor, label1=label1, label2=label2, is_first_time=is_first_time,
|
||||||
|
pred_3=pred_3,
|
||||||
|
pred_4=pred_4, pred_5=pred_5)
|
||||||
|
optimizers.Adam(learning_rate).apply_gradients(zip(g, self.variables))
|
||||||
|
return self.train_loss, Accuracy_num
|
||||||
|
|
||||||
|
# 暂时只支持batch_size等于1,不然要传z比较麻烦
|
||||||
|
def get_val_loss(self, val_data, val_label1, val_label2, batch_size=16, is_first_time: bool = True,
|
||||||
|
step_one_model=None):
|
||||||
|
val_loss = []
|
||||||
|
accuracy_num = 0
|
||||||
|
output1 = 0
|
||||||
|
output2 = 0
|
||||||
|
output3 = 0
|
||||||
|
z = 1
|
||||||
|
size, length, dims = val_data.shape
|
||||||
|
if batch_size == None:
|
||||||
|
batch_size = self.batch_size
|
||||||
|
for epoch in range(0, size - batch_size, batch_size):
|
||||||
|
each_val_data = val_data[epoch:epoch + batch_size, :, :]
|
||||||
|
each_val_label1 = val_label1[epoch:epoch + batch_size, :]
|
||||||
|
each_val_label2 = val_label2[epoch:epoch + batch_size, ]
|
||||||
|
# each_val_data = tf.expand_dims(each_val_data, axis=0)
|
||||||
|
# each_val_query = tf.expand_dims(each_val_query, axis=0)
|
||||||
|
# each_val_label = tf.expand_dims(each_val_label, axis=0)
|
||||||
|
if not is_first_time:
|
||||||
|
output1, output2, output3, _ = step_one_model.call(inputs=each_val_data, is_first_time=True)
|
||||||
|
|
||||||
|
each_loss, each_accuracy_num = self.get_loss(each_val_data, each_val_label1, each_val_label2,
|
||||||
|
is_first_time=is_first_time,
|
||||||
|
pred_3=output1, pred_4=output2, pred_5=output3)
|
||||||
|
accuracy_num += each_accuracy_num
|
||||||
|
val_loss.append(each_loss)
|
||||||
|
z += 1
|
||||||
|
|
||||||
|
val_accuracy = accuracy_num / ((z-1) * batch_size)
|
||||||
|
val_total_loss = tf.reduce_mean(val_loss)
|
||||||
|
return val_total_loss, val_accuracy
|
||||||
|
|
||||||
|
|
||||||
|
class RevConv(keras.layers.Layer):
|
||||||
|
|
||||||
|
def __init__(self, kernel_size=3):
|
||||||
|
# 调用父类__init__()方法
|
||||||
|
super(RevConv, self).__init__()
|
||||||
|
self.kernel_size = kernel_size
|
||||||
|
|
||||||
|
def get_config(self):
|
||||||
|
# 自定义层里面的属性
|
||||||
|
config = (
|
||||||
|
{
|
||||||
|
'kernel_size': self.kernel_size
|
||||||
|
}
|
||||||
|
)
|
||||||
|
base_config = super(RevConv, self).get_config()
|
||||||
|
return dict(list(base_config.items()) + list(config.items()))
|
||||||
|
|
||||||
|
def build(self, input_shape):
|
||||||
|
# print(input_shape)
|
||||||
|
_, _, output_dim = input_shape[0], input_shape[1], input_shape[2]
|
||||||
|
self.conv1 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=self.kernel_size, strides=1,
|
||||||
|
padding='causal',
|
||||||
|
dilation_rate=4)
|
||||||
|
|
||||||
|
self.conv2 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=1, strides=1, padding='causal',
|
||||||
|
dilation_rate=4)
|
||||||
|
# self.b2 = tf.keras.layers.BatchNormalization()
|
||||||
|
|
||||||
|
# self.b3 = tf.keras.layers.BatchNormalization()
|
||||||
|
|
||||||
|
# out = tf.keras.layers.Add()([b1, b2, b3])
|
||||||
|
# out = tf.nn.relu(out)
|
||||||
|
|
||||||
|
def call(self, inputs, **kwargs):
|
||||||
|
conv1 = self.conv1(inputs)
|
||||||
|
b1 = tf.keras.layers.BatchNormalization()(conv1)
|
||||||
|
b1 = tf.nn.leaky_relu(b1)
|
||||||
|
# b1 = self.b1
|
||||||
|
|
||||||
|
conv2 = self.conv2(inputs)
|
||||||
|
b2 = tf.keras.layers.BatchNormalization()(conv2)
|
||||||
|
b2 = tf.nn.leaky_relu(b2)
|
||||||
|
|
||||||
|
b3 = tf.keras.layers.BatchNormalization()(inputs)
|
||||||
|
|
||||||
|
out = tf.keras.layers.Add()([b1, b2, b3])
|
||||||
|
out = tf.nn.relu(out)
|
||||||
|
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
class RevConvBlock(keras.layers.Layer):
|
||||||
|
|
||||||
|
def __init__(self, num: int = 3, kernel_size=3):
|
||||||
|
# 调用父类__init__()方法
|
||||||
|
super(RevConvBlock, self).__init__()
|
||||||
|
self.num = num
|
||||||
|
self.kernel_size = kernel_size
|
||||||
|
self.L = []
|
||||||
|
for i in range(num):
|
||||||
|
RepVGG = RevConv(kernel_size=kernel_size)
|
||||||
|
self.L.append(RepVGG)
|
||||||
|
|
||||||
|
def get_config(self):
|
||||||
|
# 自定义层里面的属性
|
||||||
|
config = (
|
||||||
|
{
|
||||||
|
'kernel_size': self.kernel_size,
|
||||||
|
'num': self.num
|
||||||
|
}
|
||||||
|
)
|
||||||
|
base_config = super(RevConvBlock, self).get_config()
|
||||||
|
return dict(list(base_config.items()) + list(config.items()))
|
||||||
|
|
||||||
|
def call(self, inputs, **kwargs):
|
||||||
|
for i in range(self.num):
|
||||||
|
inputs = self.L[i](inputs)
|
||||||
|
return inputs
|
||||||
|
|
@ -0,0 +1,455 @@
|
||||||
|
# _*_ coding: UTF-8 _*_
|
||||||
|
|
||||||
|
|
||||||
|
'''
|
||||||
|
@Author : dingjiawen
|
||||||
|
@Date : 2022/7/14 9:40
|
||||||
|
@Usage : 联合监测模型
|
||||||
|
@Desc : RNet:DCAU只分类,不预测
|
||||||
|
'''
|
||||||
|
|
||||||
|
import tensorflow as tf
|
||||||
|
import tensorflow.keras as keras
|
||||||
|
from tensorflow.keras import *
|
||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
from model.DepthwiseCon1D.DepthwiseConv1D import DepthwiseConv1D
|
||||||
|
from model.Dynamic_channelAttention.Dynamic_channelAttention import DynamicChannelAttention, DynamicPooling
|
||||||
|
from condition_monitoring.data_deal import loadData
|
||||||
|
from model.LossFunction.smooth_L1_Loss import SmoothL1Loss
|
||||||
|
|
||||||
|
|
||||||
|
class Joint_Monitoring(keras.Model):
|
||||||
|
|
||||||
|
def __init__(self, conv_filter=20):
|
||||||
|
# 调用父类__init__()方法
|
||||||
|
super(Joint_Monitoring, self).__init__()
|
||||||
|
# step one
|
||||||
|
self.RepDCBlock1 = RevConvBlock(num=3, kernel_size=5)
|
||||||
|
self.conv1 = tf.keras.layers.Conv1D(filters=conv_filter, kernel_size=1, strides=2, padding='SAME')
|
||||||
|
self.upsample1 = tf.keras.layers.UpSampling1D(size=2)
|
||||||
|
|
||||||
|
self.DACU2 = DynamicChannelAttention()
|
||||||
|
self.RepDCBlock2 = RevConvBlock(num=3, kernel_size=3)
|
||||||
|
self.conv2 = tf.keras.layers.Conv1D(filters=2 * conv_filter, kernel_size=1, strides=2, padding='SAME')
|
||||||
|
self.upsample2 = tf.keras.layers.UpSampling1D(size=2)
|
||||||
|
|
||||||
|
self.DACU3 = DynamicChannelAttention()
|
||||||
|
self.RepDCBlock3 = RevConvBlock(num=3, kernel_size=3)
|
||||||
|
self.p1 = DynamicPooling(pool_size=2)
|
||||||
|
self.conv3 = tf.keras.layers.Conv1D(filters=2 * conv_filter, kernel_size=3, strides=2, padding='SAME')
|
||||||
|
|
||||||
|
self.DACU4 = DynamicChannelAttention()
|
||||||
|
self.RepDCBlock4 = RevConvBlock(num=3, kernel_size=3)
|
||||||
|
self.p2 = DynamicPooling(pool_size=4)
|
||||||
|
self.conv4 = tf.keras.layers.Conv1D(filters=conv_filter, kernel_size=3, strides=2, padding='SAME')
|
||||||
|
|
||||||
|
self.RepDCBlock5 = RevConvBlock(num=3, kernel_size=3)
|
||||||
|
self.p3 = DynamicPooling(pool_size=2)
|
||||||
|
|
||||||
|
# step two
|
||||||
|
# 重现原数据
|
||||||
|
self.GRU1 = tf.keras.layers.GRU(128, return_sequences=False)
|
||||||
|
self.d1 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu)
|
||||||
|
self.output1 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu)
|
||||||
|
|
||||||
|
self.GRU2 = tf.keras.layers.GRU(128, return_sequences=False)
|
||||||
|
self.d2 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu)
|
||||||
|
self.output2 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu)
|
||||||
|
|
||||||
|
self.GRU3 = tf.keras.layers.GRU(128, return_sequences=False)
|
||||||
|
self.d3 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu)
|
||||||
|
self.output3 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu)
|
||||||
|
|
||||||
|
# step three
|
||||||
|
# 分类器
|
||||||
|
self.d4 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu)
|
||||||
|
self.d5 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu)
|
||||||
|
# tf.nn.softmax
|
||||||
|
self.output4 = tf.keras.layers.Dense(1, activation=tf.nn.sigmoid)
|
||||||
|
|
||||||
|
|
||||||
|
# loss
|
||||||
|
self.train_loss = []
|
||||||
|
|
||||||
|
def call(self, inputs, training=None, mask=None, is_first_time: bool = True):
|
||||||
|
# step one
|
||||||
|
RepDCBlock1 = self.RepDCBlock1(inputs)
|
||||||
|
RepDCBlock1 = tf.keras.layers.BatchNormalization()(RepDCBlock1)
|
||||||
|
conv1 = self.conv1(RepDCBlock1)
|
||||||
|
conv1 = tf.nn.leaky_relu(conv1)
|
||||||
|
conv1 = tf.keras.layers.BatchNormalization()(conv1)
|
||||||
|
upsample1 = self.upsample1(conv1)
|
||||||
|
|
||||||
|
DACU2 = self.DACU2(upsample1)
|
||||||
|
DACU2 = tf.keras.layers.BatchNormalization()(DACU2)
|
||||||
|
RepDCBlock2 = self.RepDCBlock2(DACU2)
|
||||||
|
RepDCBlock2 = tf.keras.layers.BatchNormalization()(RepDCBlock2)
|
||||||
|
conv2 = self.conv2(RepDCBlock2)
|
||||||
|
conv2 = tf.nn.leaky_relu(conv2)
|
||||||
|
conv2 = tf.keras.layers.BatchNormalization()(conv2)
|
||||||
|
upsample2 = self.upsample2(conv2)
|
||||||
|
|
||||||
|
DACU3 = self.DACU3(upsample2)
|
||||||
|
DACU3 = tf.keras.layers.BatchNormalization()(DACU3)
|
||||||
|
RepDCBlock3 = self.RepDCBlock3(DACU3)
|
||||||
|
RepDCBlock3 = tf.keras.layers.BatchNormalization()(RepDCBlock3)
|
||||||
|
conv3 = self.conv3(RepDCBlock3)
|
||||||
|
conv3 = tf.nn.leaky_relu(conv3)
|
||||||
|
conv3 = tf.keras.layers.BatchNormalization()(conv3)
|
||||||
|
|
||||||
|
concat1 = tf.concat([conv2, conv3], axis=1)
|
||||||
|
|
||||||
|
DACU4 = self.DACU4(concat1)
|
||||||
|
DACU4 = tf.keras.layers.BatchNormalization()(DACU4)
|
||||||
|
RepDCBlock4 = self.RepDCBlock4(DACU4)
|
||||||
|
RepDCBlock4 = tf.keras.layers.BatchNormalization()(RepDCBlock4)
|
||||||
|
conv4 = self.conv4(RepDCBlock4)
|
||||||
|
conv4 = tf.nn.leaky_relu(conv4)
|
||||||
|
conv4 = tf.keras.layers.BatchNormalization()(conv4)
|
||||||
|
|
||||||
|
concat2 = tf.concat([conv1, conv4], axis=1)
|
||||||
|
|
||||||
|
RepDCBlock5 = self.RepDCBlock5(concat2)
|
||||||
|
RepDCBlock5 = tf.keras.layers.BatchNormalization()(RepDCBlock5)
|
||||||
|
|
||||||
|
output1 = []
|
||||||
|
output2 = []
|
||||||
|
output3 = []
|
||||||
|
output4 = []
|
||||||
|
|
||||||
|
if is_first_time:
|
||||||
|
# step two
|
||||||
|
# 重现原数据
|
||||||
|
# 接block3
|
||||||
|
GRU1 = self.GRU1(RepDCBlock3)
|
||||||
|
GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
|
||||||
|
d1 = self.d1(GRU1)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output1 = self.output1(d1)
|
||||||
|
# 接block4
|
||||||
|
GRU2 = self.GRU2(RepDCBlock4)
|
||||||
|
GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
|
||||||
|
d2 = self.d2(GRU2)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output2 = self.output2(d2)
|
||||||
|
# 接block5
|
||||||
|
GRU3 = self.GRU3(RepDCBlock5)
|
||||||
|
GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
|
||||||
|
d3 = self.d3(GRU3)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output3 = self.output3(d3)
|
||||||
|
else:
|
||||||
|
GRU1 = self.GRU1(RepDCBlock3)
|
||||||
|
GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
|
||||||
|
d1 = self.d1(GRU1)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output1 = self.output1(d1)
|
||||||
|
# 接block4
|
||||||
|
GRU2 = self.GRU2(RepDCBlock4)
|
||||||
|
GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
|
||||||
|
d2 = self.d2(GRU2)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output2 = self.output2(d2)
|
||||||
|
# 接block5
|
||||||
|
GRU3 = self.GRU3(RepDCBlock5)
|
||||||
|
GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
|
||||||
|
d3 = self.d3(GRU3)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output3 = self.output3(d3)
|
||||||
|
|
||||||
|
# 多尺度动态池化
|
||||||
|
# p1 = self.p1(output1)
|
||||||
|
# B, _, _ = p1.shape
|
||||||
|
# f1 = tf.reshape(p1, shape=[B, -1])
|
||||||
|
# p2 = self.p2(output2)
|
||||||
|
# f2 = tf.reshape(p2, shape=[B, -1])
|
||||||
|
# p3 = self.p3(output3)
|
||||||
|
# f3 = tf.reshape(p3, shape=[B, -1])
|
||||||
|
# step three
|
||||||
|
# 分类器
|
||||||
|
concat3 = tf.concat([output2, output3], axis=1)
|
||||||
|
# dropout = tf.keras.layers.Dropout(0.25)(concat3)
|
||||||
|
d4 = self.d4(concat3)
|
||||||
|
d5 = self.d5(d4)
|
||||||
|
# d4 = tf.keras.layers.BatchNormalization()(d4)
|
||||||
|
output4 = self.output4(d5)
|
||||||
|
|
||||||
|
return output1, output2, output3, output4
|
||||||
|
|
||||||
|
def get_loss(self, inputs_tensor, label1=None, label2=None, is_first_time: bool = True, pred_3=None, pred_4=None,
|
||||||
|
pred_5=None):
|
||||||
|
# step one
|
||||||
|
RepDCBlock1 = self.RepDCBlock1(inputs_tensor)
|
||||||
|
RepDCBlock1 = tf.keras.layers.BatchNormalization()(RepDCBlock1)
|
||||||
|
conv1 = self.conv1(RepDCBlock1)
|
||||||
|
conv1 = tf.nn.leaky_relu(conv1)
|
||||||
|
conv1 = tf.keras.layers.BatchNormalization()(conv1)
|
||||||
|
upsample1 = self.upsample1(conv1)
|
||||||
|
|
||||||
|
DACU2 = self.DACU2(upsample1)
|
||||||
|
DACU2 = tf.keras.layers.BatchNormalization()(DACU2)
|
||||||
|
RepDCBlock2 = self.RepDCBlock2(DACU2)
|
||||||
|
RepDCBlock2 = tf.keras.layers.BatchNormalization()(RepDCBlock2)
|
||||||
|
conv2 = self.conv2(RepDCBlock2)
|
||||||
|
conv2 = tf.nn.leaky_relu(conv2)
|
||||||
|
conv2 = tf.keras.layers.BatchNormalization()(conv2)
|
||||||
|
upsample2 = self.upsample2(conv2)
|
||||||
|
|
||||||
|
DACU3 = self.DACU3(upsample2)
|
||||||
|
DACU3 = tf.keras.layers.BatchNormalization()(DACU3)
|
||||||
|
RepDCBlock3 = self.RepDCBlock3(DACU3)
|
||||||
|
RepDCBlock3 = tf.keras.layers.BatchNormalization()(RepDCBlock3)
|
||||||
|
conv3 = self.conv3(RepDCBlock3)
|
||||||
|
conv3 = tf.nn.leaky_relu(conv3)
|
||||||
|
conv3 = tf.keras.layers.BatchNormalization()(conv3)
|
||||||
|
|
||||||
|
concat1 = tf.concat([conv2, conv3], axis=1)
|
||||||
|
|
||||||
|
DACU4 = self.DACU4(concat1)
|
||||||
|
DACU4 = tf.keras.layers.BatchNormalization()(DACU4)
|
||||||
|
RepDCBlock4 = self.RepDCBlock4(DACU4)
|
||||||
|
RepDCBlock4 = tf.keras.layers.BatchNormalization()(RepDCBlock4)
|
||||||
|
conv4 = self.conv4(RepDCBlock4)
|
||||||
|
conv4 = tf.nn.leaky_relu(conv4)
|
||||||
|
conv4 = tf.keras.layers.BatchNormalization()(conv4)
|
||||||
|
|
||||||
|
concat2 = tf.concat([conv1, conv4], axis=1)
|
||||||
|
|
||||||
|
RepDCBlock5 = self.RepDCBlock5(concat2)
|
||||||
|
RepDCBlock5 = tf.keras.layers.BatchNormalization()(RepDCBlock5)
|
||||||
|
|
||||||
|
if is_first_time:
|
||||||
|
# step two
|
||||||
|
# 重现原数据
|
||||||
|
# 接block3
|
||||||
|
GRU1 = self.GRU1(RepDCBlock3)
|
||||||
|
GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
|
||||||
|
d1 = self.d1(GRU1)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output1 = self.output1(d1)
|
||||||
|
# 接block4
|
||||||
|
GRU2 = self.GRU2(RepDCBlock4)
|
||||||
|
GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
|
||||||
|
d2 = self.d2(GRU2)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output2 = self.output2(d2)
|
||||||
|
# 接block5
|
||||||
|
GRU3 = self.GRU3(RepDCBlock5)
|
||||||
|
GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
|
||||||
|
d3 = self.d3(GRU3)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output3 = self.output3(d3)
|
||||||
|
|
||||||
|
# reduce_mean降维计算均值
|
||||||
|
MSE_loss1 = SmoothL1Loss()(y_true=label1, y_pred=output1)
|
||||||
|
MSE_loss2 = SmoothL1Loss()(y_true=label1, y_pred=output2)
|
||||||
|
MSE_loss3 = SmoothL1Loss()(y_true=label1, y_pred=output3)
|
||||||
|
# MSE_loss1 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output1))
|
||||||
|
# MSE_loss2 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output2))
|
||||||
|
# MSE_loss3 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output3))
|
||||||
|
|
||||||
|
print("MSE_loss1:", MSE_loss1.numpy())
|
||||||
|
print("MSE_loss2:", MSE_loss2.numpy())
|
||||||
|
print("MSE_loss3:", MSE_loss3.numpy())
|
||||||
|
loss = MSE_loss1 + MSE_loss2 + MSE_loss3
|
||||||
|
Accuracy_num = 0
|
||||||
|
|
||||||
|
else:
|
||||||
|
# step two
|
||||||
|
# 重现原数据
|
||||||
|
# 接block3
|
||||||
|
GRU1 = self.GRU1(RepDCBlock3)
|
||||||
|
GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
|
||||||
|
d1 = self.d1(GRU1)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output1 = self.output1(d1)
|
||||||
|
# 接block4
|
||||||
|
GRU2 = self.GRU2(RepDCBlock4)
|
||||||
|
GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
|
||||||
|
d2 = self.d2(GRU2)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output2 = self.output2(d2)
|
||||||
|
# 接block5
|
||||||
|
GRU3 = self.GRU3(RepDCBlock5)
|
||||||
|
GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
|
||||||
|
d3 = self.d3(GRU3)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output3 = self.output3(d3)
|
||||||
|
|
||||||
|
# 多尺度动态池化
|
||||||
|
# p1 = self.p1(output1)
|
||||||
|
# B, _, _ = p1.shape
|
||||||
|
# f1 = tf.reshape(p1, shape=[B, -1])
|
||||||
|
# p2 = self.p2(output2)
|
||||||
|
# f2 = tf.reshape(p2, shape=[B, -1])
|
||||||
|
# p3 = self.p3(output3)
|
||||||
|
# f3 = tf.reshape(p3, shape=[B, -1])
|
||||||
|
# step three
|
||||||
|
# 分类器
|
||||||
|
concat3 = tf.concat([output1, output2, output3], axis=1)
|
||||||
|
# dropout = tf.keras.layers.Dropout(0.25)(concat3)
|
||||||
|
d4 = self.d4(concat3)
|
||||||
|
d5 = self.d5(d4)
|
||||||
|
# d4 = tf.keras.layers.BatchNormalization()(d4)
|
||||||
|
output4 = self.output4(d5)
|
||||||
|
|
||||||
|
# reduce_mean降维计算均值
|
||||||
|
MSE_loss= 0
|
||||||
|
# MSE_loss = SmoothL1Loss()(y_true=pred_3, y_pred=output1)
|
||||||
|
# MSE_loss += SmoothL1Loss()(y_true=pred_4, y_pred=output2)
|
||||||
|
# MSE_loss += SmoothL1Loss()(y_true=pred_5, y_pred=output3)
|
||||||
|
Cross_Entropy_loss = tf.reduce_mean(
|
||||||
|
tf.losses.binary_crossentropy(y_true=label2, y_pred=output4, from_logits=True))
|
||||||
|
|
||||||
|
print("MSE_loss:", MSE_loss)
|
||||||
|
print("Cross_Entropy_loss:", Cross_Entropy_loss.numpy())
|
||||||
|
Accuracy_num = self.get_Accuracy(label=label2, output=output4)
|
||||||
|
loss = Cross_Entropy_loss
|
||||||
|
return loss, Accuracy_num
|
||||||
|
|
||||||
|
def get_Accuracy(self, output, label):
|
||||||
|
|
||||||
|
predict_label = tf.round(output)
|
||||||
|
label = tf.cast(label, dtype=tf.float32)
|
||||||
|
|
||||||
|
t = np.array(label - predict_label)
|
||||||
|
|
||||||
|
b = t[t[:] == 0]
|
||||||
|
|
||||||
|
return b.__len__()
|
||||||
|
|
||||||
|
def get_grad(self, input_tensor, label1=None, label2=None, is_first_time: bool = True, pred_3=None, pred_4=None,
|
||||||
|
pred_5=None):
|
||||||
|
with tf.GradientTape() as tape:
|
||||||
|
# todo 原本tape只会监控由tf.Variable创建的trainable=True属性
|
||||||
|
# tape.watch(self.variables)
|
||||||
|
L, Accuracy_num = self.get_loss(input_tensor, label1=label1, label2=label2, is_first_time=is_first_time,
|
||||||
|
pred_3=pred_3,
|
||||||
|
pred_4=pred_4, pred_5=pred_5)
|
||||||
|
# 保存一下loss,用于输出
|
||||||
|
self.train_loss = L
|
||||||
|
g = tape.gradient(L, self.variables)
|
||||||
|
return g, Accuracy_num
|
||||||
|
|
||||||
|
def train(self, input_tensor, label1=None, label2=None, learning_rate=1e-3, is_first_time: bool = True, pred_3=None,
|
||||||
|
pred_4=None, pred_5=None):
|
||||||
|
g, Accuracy_num = self.get_grad(input_tensor, label1=label1, label2=label2, is_first_time=is_first_time,
|
||||||
|
pred_3=pred_3,
|
||||||
|
pred_4=pred_4, pred_5=pred_5)
|
||||||
|
optimizers.Adam(learning_rate).apply_gradients(zip(g, self.variables))
|
||||||
|
return self.train_loss, Accuracy_num
|
||||||
|
|
||||||
|
# 暂时只支持batch_size等于1,不然要传z比较麻烦
|
||||||
|
def get_val_loss(self, val_data, val_label1, val_label2, batch_size=16, is_first_time: bool = True,
|
||||||
|
step_one_model=None):
|
||||||
|
val_loss = []
|
||||||
|
accuracy_num = 0
|
||||||
|
output1 = 0
|
||||||
|
output2 = 0
|
||||||
|
output3 = 0
|
||||||
|
z = 1
|
||||||
|
size, length, dims = val_data.shape
|
||||||
|
if batch_size == None:
|
||||||
|
batch_size = self.batch_size
|
||||||
|
for epoch in range(0, size - batch_size, batch_size):
|
||||||
|
each_val_data = val_data[epoch:epoch + batch_size, :, :]
|
||||||
|
each_val_label1 = val_label1[epoch:epoch + batch_size, :]
|
||||||
|
each_val_label2 = val_label2[epoch:epoch + batch_size, ]
|
||||||
|
# each_val_data = tf.expand_dims(each_val_data, axis=0)
|
||||||
|
# each_val_query = tf.expand_dims(each_val_query, axis=0)
|
||||||
|
# each_val_label = tf.expand_dims(each_val_label, axis=0)
|
||||||
|
if not is_first_time:
|
||||||
|
output1, output2, output3, _ = step_one_model.call(inputs=each_val_data, is_first_time=True)
|
||||||
|
|
||||||
|
each_loss, each_accuracy_num = self.get_loss(each_val_data, each_val_label1, each_val_label2,
|
||||||
|
is_first_time=is_first_time,
|
||||||
|
pred_3=output1, pred_4=output2, pred_5=output3)
|
||||||
|
accuracy_num += each_accuracy_num
|
||||||
|
val_loss.append(each_loss)
|
||||||
|
z += 1
|
||||||
|
|
||||||
|
val_accuracy = accuracy_num / ((z-1) * batch_size)
|
||||||
|
val_total_loss = tf.reduce_mean(val_loss)
|
||||||
|
return val_total_loss, val_accuracy
|
||||||
|
|
||||||
|
|
||||||
|
class RevConv(keras.layers.Layer):
|
||||||
|
|
||||||
|
def __init__(self, kernel_size=3):
|
||||||
|
# 调用父类__init__()方法
|
||||||
|
super(RevConv, self).__init__()
|
||||||
|
self.kernel_size = kernel_size
|
||||||
|
|
||||||
|
def get_config(self):
|
||||||
|
# 自定义层里面的属性
|
||||||
|
config = (
|
||||||
|
{
|
||||||
|
'kernel_size': self.kernel_size
|
||||||
|
}
|
||||||
|
)
|
||||||
|
base_config = super(RevConv, self).get_config()
|
||||||
|
return dict(list(base_config.items()) + list(config.items()))
|
||||||
|
|
||||||
|
def build(self, input_shape):
|
||||||
|
# print(input_shape)
|
||||||
|
_, _, output_dim = input_shape[0], input_shape[1], input_shape[2]
|
||||||
|
self.conv1 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=self.kernel_size, strides=1,
|
||||||
|
padding='causal',
|
||||||
|
dilation_rate=4)
|
||||||
|
|
||||||
|
self.conv2 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=1, strides=1, padding='causal',
|
||||||
|
dilation_rate=4)
|
||||||
|
# self.b2 = tf.keras.layers.BatchNormalization()
|
||||||
|
|
||||||
|
# self.b3 = tf.keras.layers.BatchNormalization()
|
||||||
|
|
||||||
|
# out = tf.keras.layers.Add()([b1, b2, b3])
|
||||||
|
# out = tf.nn.relu(out)
|
||||||
|
|
||||||
|
def call(self, inputs, **kwargs):
|
||||||
|
conv1 = self.conv1(inputs)
|
||||||
|
b1 = tf.keras.layers.BatchNormalization()(conv1)
|
||||||
|
b1 = tf.nn.leaky_relu(b1)
|
||||||
|
# b1 = self.b1
|
||||||
|
|
||||||
|
conv2 = self.conv2(inputs)
|
||||||
|
b2 = tf.keras.layers.BatchNormalization()(conv2)
|
||||||
|
b2 = tf.nn.leaky_relu(b2)
|
||||||
|
|
||||||
|
b3 = tf.keras.layers.BatchNormalization()(inputs)
|
||||||
|
|
||||||
|
out = tf.keras.layers.Add()([b1, b2, b3])
|
||||||
|
out = tf.nn.relu(out)
|
||||||
|
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
class RevConvBlock(keras.layers.Layer):
|
||||||
|
|
||||||
|
def __init__(self, num: int = 3, kernel_size=3):
|
||||||
|
# 调用父类__init__()方法
|
||||||
|
super(RevConvBlock, self).__init__()
|
||||||
|
self.num = num
|
||||||
|
self.kernel_size = kernel_size
|
||||||
|
self.L = []
|
||||||
|
for i in range(num):
|
||||||
|
RepVGG = RevConv(kernel_size=kernel_size)
|
||||||
|
self.L.append(RepVGG)
|
||||||
|
|
||||||
|
def get_config(self):
|
||||||
|
# 自定义层里面的属性
|
||||||
|
config = (
|
||||||
|
{
|
||||||
|
'kernel_size': self.kernel_size,
|
||||||
|
'num': self.num
|
||||||
|
}
|
||||||
|
)
|
||||||
|
base_config = super(RevConvBlock, self).get_config()
|
||||||
|
return dict(list(base_config.items()) + list(config.items()))
|
||||||
|
|
||||||
|
def call(self, inputs, **kwargs):
|
||||||
|
for i in range(self.num):
|
||||||
|
inputs = self.L[i](inputs)
|
||||||
|
return inputs
|
||||||
|
|
@ -0,0 +1,457 @@
|
||||||
|
# _*_ coding: UTF-8 _*_
|
||||||
|
|
||||||
|
|
||||||
|
'''
|
||||||
|
@Author : dingjiawen
|
||||||
|
@Date : 2022/7/14 9:40
|
||||||
|
@Usage : 联合监测模型
|
||||||
|
@Desc : RNet:DCAU只分类,不预测
|
||||||
|
'''
|
||||||
|
|
||||||
|
import tensorflow as tf
|
||||||
|
import tensorflow.keras as keras
|
||||||
|
from tensorflow.keras import *
|
||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
from model.DepthwiseCon1D.DepthwiseConv1D import DepthwiseConv1D
|
||||||
|
from model.Dynamic_channelAttention.Dynamic_channelAttention import DynamicChannelAttention, DynamicPooling
|
||||||
|
from condition_monitoring.data_deal import loadData
|
||||||
|
from model.LossFunction.smooth_L1_Loss import SmoothL1Loss
|
||||||
|
|
||||||
|
|
||||||
|
class Joint_Monitoring(keras.Model):
|
||||||
|
|
||||||
|
def __init__(self, conv_filter=20):
|
||||||
|
# 调用父类__init__()方法
|
||||||
|
super(Joint_Monitoring, self).__init__()
|
||||||
|
# step one
|
||||||
|
self.RepDCBlock1 = RevConvBlock(num=3, kernel_size=5)
|
||||||
|
self.conv1 = tf.keras.layers.Conv1D(filters=conv_filter, kernel_size=1, strides=2, padding='SAME')
|
||||||
|
self.upsample1 = tf.keras.layers.UpSampling1D(size=2)
|
||||||
|
|
||||||
|
self.DACU2 = DynamicChannelAttention()
|
||||||
|
self.RepDCBlock2 = RevConvBlock(num=3, kernel_size=3)
|
||||||
|
self.conv2 = tf.keras.layers.Conv1D(filters=2 * conv_filter, kernel_size=1, strides=2, padding='SAME')
|
||||||
|
self.upsample2 = tf.keras.layers.UpSampling1D(size=2)
|
||||||
|
|
||||||
|
self.DACU3 = DynamicChannelAttention()
|
||||||
|
self.RepDCBlock3 = RevConvBlock(num=3, kernel_size=3)
|
||||||
|
self.p1 = DynamicPooling(pool_size=2)
|
||||||
|
self.conv3 = tf.keras.layers.Conv1D(filters=2 * conv_filter, kernel_size=3, strides=2, padding='SAME')
|
||||||
|
|
||||||
|
self.DACU4 = DynamicChannelAttention()
|
||||||
|
self.RepDCBlock4 = RevConvBlock(num=3, kernel_size=3)
|
||||||
|
self.p2 = DynamicPooling(pool_size=4)
|
||||||
|
self.conv4 = tf.keras.layers.Conv1D(filters=conv_filter, kernel_size=3, strides=2, padding='SAME')
|
||||||
|
|
||||||
|
self.RepDCBlock5 = RevConvBlock(num=3, kernel_size=3)
|
||||||
|
self.p3 = DynamicPooling(pool_size=2)
|
||||||
|
|
||||||
|
# step two
|
||||||
|
# 重现原数据
|
||||||
|
self.GRU1 = tf.keras.layers.GRU(128, return_sequences=False)
|
||||||
|
self.d1 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu)
|
||||||
|
self.output1 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu)
|
||||||
|
|
||||||
|
self.GRU2 = tf.keras.layers.GRU(128, return_sequences=False)
|
||||||
|
self.d2 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu)
|
||||||
|
self.output2 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu)
|
||||||
|
|
||||||
|
self.GRU3 = tf.keras.layers.GRU(128, return_sequences=False)
|
||||||
|
self.d3 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu)
|
||||||
|
self.output3 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu)
|
||||||
|
|
||||||
|
# step three
|
||||||
|
# 分类器
|
||||||
|
self.d4 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu)
|
||||||
|
self.d5 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu)
|
||||||
|
# tf.nn.softmax
|
||||||
|
self.output4 = tf.keras.layers.Dense(1, activation=tf.nn.sigmoid)
|
||||||
|
|
||||||
|
|
||||||
|
# loss
|
||||||
|
self.train_loss = []
|
||||||
|
|
||||||
|
def call(self, inputs, training=None, mask=None, is_first_time: bool = True):
|
||||||
|
# step one
|
||||||
|
RepDCBlock1 = self.RepDCBlock1(inputs)
|
||||||
|
RepDCBlock1 = tf.keras.layers.BatchNormalization()(RepDCBlock1)
|
||||||
|
conv1 = self.conv1(RepDCBlock1)
|
||||||
|
conv1 = tf.nn.leaky_relu(conv1)
|
||||||
|
conv1 = tf.keras.layers.BatchNormalization()(conv1)
|
||||||
|
upsample1 = self.upsample1(conv1)
|
||||||
|
|
||||||
|
DACU2 = self.DACU2(upsample1)
|
||||||
|
DACU2 = tf.keras.layers.BatchNormalization()(DACU2)
|
||||||
|
RepDCBlock2 = self.RepDCBlock2(DACU2)
|
||||||
|
RepDCBlock2 = tf.keras.layers.BatchNormalization()(RepDCBlock2)
|
||||||
|
conv2 = self.conv2(RepDCBlock2)
|
||||||
|
conv2 = tf.nn.leaky_relu(conv2)
|
||||||
|
conv2 = tf.keras.layers.BatchNormalization()(conv2)
|
||||||
|
upsample2 = self.upsample2(conv2)
|
||||||
|
|
||||||
|
DACU3 = self.DACU3(upsample2)
|
||||||
|
DACU3 = tf.keras.layers.BatchNormalization()(DACU3)
|
||||||
|
RepDCBlock3 = self.RepDCBlock3(DACU3)
|
||||||
|
RepDCBlock3 = tf.keras.layers.BatchNormalization()(RepDCBlock3)
|
||||||
|
conv3 = self.conv3(RepDCBlock3)
|
||||||
|
conv3 = tf.nn.leaky_relu(conv3)
|
||||||
|
conv3 = tf.keras.layers.BatchNormalization()(conv3)
|
||||||
|
|
||||||
|
concat1 = tf.concat([conv2, conv3], axis=1)
|
||||||
|
|
||||||
|
DACU4 = self.DACU4(concat1)
|
||||||
|
DACU4 = tf.keras.layers.BatchNormalization()(DACU4)
|
||||||
|
RepDCBlock4 = self.RepDCBlock4(DACU4)
|
||||||
|
RepDCBlock4 = tf.keras.layers.BatchNormalization()(RepDCBlock4)
|
||||||
|
conv4 = self.conv4(RepDCBlock4)
|
||||||
|
conv4 = tf.nn.leaky_relu(conv4)
|
||||||
|
conv4 = tf.keras.layers.BatchNormalization()(conv4)
|
||||||
|
|
||||||
|
concat2 = tf.concat([conv1, conv4], axis=1)
|
||||||
|
|
||||||
|
RepDCBlock5 = self.RepDCBlock5(concat2)
|
||||||
|
RepDCBlock5 = tf.keras.layers.BatchNormalization()(RepDCBlock5)
|
||||||
|
|
||||||
|
output1 = []
|
||||||
|
output2 = []
|
||||||
|
output3 = []
|
||||||
|
output4 = []
|
||||||
|
|
||||||
|
if is_first_time:
|
||||||
|
# step two
|
||||||
|
# 重现原数据
|
||||||
|
# 接block3
|
||||||
|
GRU1 = self.GRU1(RepDCBlock3)
|
||||||
|
GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
|
||||||
|
d1 = self.d1(GRU1)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output1 = self.output1(d1)
|
||||||
|
# 接block4
|
||||||
|
GRU2 = self.GRU2(RepDCBlock4)
|
||||||
|
GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
|
||||||
|
d2 = self.d2(GRU2)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output2 = self.output2(d2)
|
||||||
|
# 接block5
|
||||||
|
GRU3 = self.GRU3(RepDCBlock5)
|
||||||
|
GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
|
||||||
|
d3 = self.d3(GRU3)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output3 = self.output3(d3)
|
||||||
|
else:
|
||||||
|
GRU1 = self.GRU1(RepDCBlock3)
|
||||||
|
GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
|
||||||
|
d1 = self.d1(GRU1)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output1 = self.output1(d1)
|
||||||
|
# 接block4
|
||||||
|
GRU2 = self.GRU2(RepDCBlock4)
|
||||||
|
GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
|
||||||
|
d2 = self.d2(GRU2)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output2 = self.output2(d2)
|
||||||
|
# 接block5
|
||||||
|
GRU3 = self.GRU3(RepDCBlock5)
|
||||||
|
GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
|
||||||
|
d3 = self.d3(GRU3)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output3 = self.output3(d3)
|
||||||
|
|
||||||
|
# 多尺度动态池化
|
||||||
|
# p1 = self.p1(output1)
|
||||||
|
# B, _, _ = p1.shape
|
||||||
|
# f1 = tf.reshape(p1, shape=[B, -1])
|
||||||
|
# p2 = self.p2(output2)
|
||||||
|
# f2 = tf.reshape(p2, shape=[B, -1])
|
||||||
|
# p3 = self.p3(output3)
|
||||||
|
# f3 = tf.reshape(p3, shape=[B, -1])
|
||||||
|
# step three
|
||||||
|
# 分类器
|
||||||
|
# concat3 = tf.concat([output1, output2, output3], axis=1)
|
||||||
|
# dropout = tf.keras.layers.Dropout(0.25)(concat3)
|
||||||
|
concat3 = output3
|
||||||
|
d4 = self.d4(concat3)
|
||||||
|
d5 = self.d5(d4)
|
||||||
|
# d4 = tf.keras.layers.BatchNormalization()(d4)
|
||||||
|
output4 = self.output4(d5)
|
||||||
|
|
||||||
|
return output1, output2, output3, output4
|
||||||
|
|
||||||
|
def get_loss(self, inputs_tensor, label1=None, label2=None, is_first_time: bool = True, pred_3=None, pred_4=None,
|
||||||
|
pred_5=None):
|
||||||
|
# step one
|
||||||
|
RepDCBlock1 = self.RepDCBlock1(inputs_tensor)
|
||||||
|
RepDCBlock1 = tf.keras.layers.BatchNormalization()(RepDCBlock1)
|
||||||
|
conv1 = self.conv1(RepDCBlock1)
|
||||||
|
conv1 = tf.nn.leaky_relu(conv1)
|
||||||
|
conv1 = tf.keras.layers.BatchNormalization()(conv1)
|
||||||
|
upsample1 = self.upsample1(conv1)
|
||||||
|
|
||||||
|
DACU2 = self.DACU2(upsample1)
|
||||||
|
DACU2 = tf.keras.layers.BatchNormalization()(DACU2)
|
||||||
|
RepDCBlock2 = self.RepDCBlock2(DACU2)
|
||||||
|
RepDCBlock2 = tf.keras.layers.BatchNormalization()(RepDCBlock2)
|
||||||
|
conv2 = self.conv2(RepDCBlock2)
|
||||||
|
conv2 = tf.nn.leaky_relu(conv2)
|
||||||
|
conv2 = tf.keras.layers.BatchNormalization()(conv2)
|
||||||
|
upsample2 = self.upsample2(conv2)
|
||||||
|
|
||||||
|
DACU3 = self.DACU3(upsample2)
|
||||||
|
DACU3 = tf.keras.layers.BatchNormalization()(DACU3)
|
||||||
|
RepDCBlock3 = self.RepDCBlock3(DACU3)
|
||||||
|
RepDCBlock3 = tf.keras.layers.BatchNormalization()(RepDCBlock3)
|
||||||
|
conv3 = self.conv3(RepDCBlock3)
|
||||||
|
conv3 = tf.nn.leaky_relu(conv3)
|
||||||
|
conv3 = tf.keras.layers.BatchNormalization()(conv3)
|
||||||
|
|
||||||
|
concat1 = tf.concat([conv2, conv3], axis=1)
|
||||||
|
|
||||||
|
DACU4 = self.DACU4(concat1)
|
||||||
|
DACU4 = tf.keras.layers.BatchNormalization()(DACU4)
|
||||||
|
RepDCBlock4 = self.RepDCBlock4(DACU4)
|
||||||
|
RepDCBlock4 = tf.keras.layers.BatchNormalization()(RepDCBlock4)
|
||||||
|
conv4 = self.conv4(RepDCBlock4)
|
||||||
|
conv4 = tf.nn.leaky_relu(conv4)
|
||||||
|
conv4 = tf.keras.layers.BatchNormalization()(conv4)
|
||||||
|
|
||||||
|
concat2 = tf.concat([conv1, conv4], axis=1)
|
||||||
|
|
||||||
|
RepDCBlock5 = self.RepDCBlock5(concat2)
|
||||||
|
RepDCBlock5 = tf.keras.layers.BatchNormalization()(RepDCBlock5)
|
||||||
|
|
||||||
|
if is_first_time:
|
||||||
|
# step two
|
||||||
|
# 重现原数据
|
||||||
|
# 接block3
|
||||||
|
GRU1 = self.GRU1(RepDCBlock3)
|
||||||
|
GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
|
||||||
|
d1 = self.d1(GRU1)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output1 = self.output1(d1)
|
||||||
|
# 接block4
|
||||||
|
GRU2 = self.GRU2(RepDCBlock4)
|
||||||
|
GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
|
||||||
|
d2 = self.d2(GRU2)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output2 = self.output2(d2)
|
||||||
|
# 接block5
|
||||||
|
GRU3 = self.GRU3(RepDCBlock5)
|
||||||
|
GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
|
||||||
|
d3 = self.d3(GRU3)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output3 = self.output3(d3)
|
||||||
|
|
||||||
|
# reduce_mean降维计算均值
|
||||||
|
MSE_loss1 = SmoothL1Loss()(y_true=label1, y_pred=output1)
|
||||||
|
MSE_loss2 = SmoothL1Loss()(y_true=label1, y_pred=output2)
|
||||||
|
MSE_loss3 = SmoothL1Loss()(y_true=label1, y_pred=output3)
|
||||||
|
# MSE_loss1 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output1))
|
||||||
|
# MSE_loss2 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output2))
|
||||||
|
# MSE_loss3 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output3))
|
||||||
|
|
||||||
|
print("MSE_loss1:", MSE_loss1.numpy())
|
||||||
|
print("MSE_loss2:", MSE_loss2.numpy())
|
||||||
|
print("MSE_loss3:", MSE_loss3.numpy())
|
||||||
|
loss = MSE_loss1 + MSE_loss2 + MSE_loss3
|
||||||
|
Accuracy_num = 0
|
||||||
|
|
||||||
|
else:
|
||||||
|
# step two
|
||||||
|
# 重现原数据
|
||||||
|
# 接block3
|
||||||
|
GRU1 = self.GRU1(RepDCBlock3)
|
||||||
|
GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
|
||||||
|
d1 = self.d1(GRU1)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output1 = self.output1(d1)
|
||||||
|
# 接block4
|
||||||
|
GRU2 = self.GRU2(RepDCBlock4)
|
||||||
|
GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
|
||||||
|
d2 = self.d2(GRU2)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output2 = self.output2(d2)
|
||||||
|
# 接block5
|
||||||
|
GRU3 = self.GRU3(RepDCBlock5)
|
||||||
|
GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
|
||||||
|
d3 = self.d3(GRU3)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output3 = self.output3(d3)
|
||||||
|
|
||||||
|
# 多尺度动态池化
|
||||||
|
# p1 = self.p1(output1)
|
||||||
|
# B, _, _ = p1.shape
|
||||||
|
# f1 = tf.reshape(p1, shape=[B, -1])
|
||||||
|
# p2 = self.p2(output2)
|
||||||
|
# f2 = tf.reshape(p2, shape=[B, -1])
|
||||||
|
# p3 = self.p3(output3)
|
||||||
|
# f3 = tf.reshape(p3, shape=[B, -1])
|
||||||
|
# step three
|
||||||
|
# 分类器
|
||||||
|
# concat3 = tf.concat([output1, output2, output3], axis=1)
|
||||||
|
# dropout = tf.keras.layers.Dropout(0.25)(concat3)
|
||||||
|
concat3 = output3
|
||||||
|
d4 = self.d4(concat3)
|
||||||
|
d5 = self.d5(d4)
|
||||||
|
# d4 = tf.keras.layers.BatchNormalization()(d4)
|
||||||
|
output4 = self.output4(d5)
|
||||||
|
|
||||||
|
# reduce_mean降维计算均值
|
||||||
|
MSE_loss= 0
|
||||||
|
# MSE_loss = SmoothL1Loss()(y_true=pred_3, y_pred=output1)
|
||||||
|
# MSE_loss += SmoothL1Loss()(y_true=pred_4, y_pred=output2)
|
||||||
|
# MSE_loss += SmoothL1Loss()(y_true=pred_5, y_pred=output3)
|
||||||
|
Cross_Entropy_loss = tf.reduce_mean(
|
||||||
|
tf.losses.binary_crossentropy(y_true=label2, y_pred=output4, from_logits=True))
|
||||||
|
|
||||||
|
print("MSE_loss:", MSE_loss)
|
||||||
|
print("Cross_Entropy_loss:", Cross_Entropy_loss.numpy())
|
||||||
|
Accuracy_num = self.get_Accuracy(label=label2, output=output4)
|
||||||
|
loss = Cross_Entropy_loss
|
||||||
|
return loss, Accuracy_num
|
||||||
|
|
||||||
|
def get_Accuracy(self, output, label):
|
||||||
|
|
||||||
|
predict_label = tf.round(output)
|
||||||
|
label = tf.cast(label, dtype=tf.float32)
|
||||||
|
|
||||||
|
t = np.array(label - predict_label)
|
||||||
|
|
||||||
|
b = t[t[:] == 0]
|
||||||
|
|
||||||
|
return b.__len__()
|
||||||
|
|
||||||
|
def get_grad(self, input_tensor, label1=None, label2=None, is_first_time: bool = True, pred_3=None, pred_4=None,
|
||||||
|
pred_5=None):
|
||||||
|
with tf.GradientTape() as tape:
|
||||||
|
# todo 原本tape只会监控由tf.Variable创建的trainable=True属性
|
||||||
|
# tape.watch(self.variables)
|
||||||
|
L, Accuracy_num = self.get_loss(input_tensor, label1=label1, label2=label2, is_first_time=is_first_time,
|
||||||
|
pred_3=pred_3,
|
||||||
|
pred_4=pred_4, pred_5=pred_5)
|
||||||
|
# 保存一下loss,用于输出
|
||||||
|
self.train_loss = L
|
||||||
|
g = tape.gradient(L, self.variables)
|
||||||
|
return g, Accuracy_num
|
||||||
|
|
||||||
|
def train(self, input_tensor, label1=None, label2=None, learning_rate=1e-3, is_first_time: bool = True, pred_3=None,
|
||||||
|
pred_4=None, pred_5=None):
|
||||||
|
g, Accuracy_num = self.get_grad(input_tensor, label1=label1, label2=label2, is_first_time=is_first_time,
|
||||||
|
pred_3=pred_3,
|
||||||
|
pred_4=pred_4, pred_5=pred_5)
|
||||||
|
optimizers.Adam(learning_rate).apply_gradients(zip(g, self.variables))
|
||||||
|
return self.train_loss, Accuracy_num
|
||||||
|
|
||||||
|
# 暂时只支持batch_size等于1,不然要传z比较麻烦
|
||||||
|
def get_val_loss(self, val_data, val_label1, val_label2, batch_size=16, is_first_time: bool = True,
|
||||||
|
step_one_model=None):
|
||||||
|
val_loss = []
|
||||||
|
accuracy_num = 0
|
||||||
|
output1 = 0
|
||||||
|
output2 = 0
|
||||||
|
output3 = 0
|
||||||
|
z = 1
|
||||||
|
size, length, dims = val_data.shape
|
||||||
|
if batch_size == None:
|
||||||
|
batch_size = self.batch_size
|
||||||
|
for epoch in range(0, size - batch_size, batch_size):
|
||||||
|
each_val_data = val_data[epoch:epoch + batch_size, :, :]
|
||||||
|
each_val_label1 = val_label1[epoch:epoch + batch_size, :]
|
||||||
|
each_val_label2 = val_label2[epoch:epoch + batch_size, ]
|
||||||
|
# each_val_data = tf.expand_dims(each_val_data, axis=0)
|
||||||
|
# each_val_query = tf.expand_dims(each_val_query, axis=0)
|
||||||
|
# each_val_label = tf.expand_dims(each_val_label, axis=0)
|
||||||
|
if not is_first_time:
|
||||||
|
output1, output2, output3, _ = step_one_model.call(inputs=each_val_data, is_first_time=True)
|
||||||
|
|
||||||
|
each_loss, each_accuracy_num = self.get_loss(each_val_data, each_val_label1, each_val_label2,
|
||||||
|
is_first_time=is_first_time,
|
||||||
|
pred_3=output1, pred_4=output2, pred_5=output3)
|
||||||
|
accuracy_num += each_accuracy_num
|
||||||
|
val_loss.append(each_loss)
|
||||||
|
z += 1
|
||||||
|
|
||||||
|
val_accuracy = accuracy_num / ((z-1) * batch_size)
|
||||||
|
val_total_loss = tf.reduce_mean(val_loss)
|
||||||
|
return val_total_loss, val_accuracy
|
||||||
|
|
||||||
|
|
||||||
|
class RevConv(keras.layers.Layer):
|
||||||
|
|
||||||
|
def __init__(self, kernel_size=3):
|
||||||
|
# 调用父类__init__()方法
|
||||||
|
super(RevConv, self).__init__()
|
||||||
|
self.kernel_size = kernel_size
|
||||||
|
|
||||||
|
def get_config(self):
|
||||||
|
# 自定义层里面的属性
|
||||||
|
config = (
|
||||||
|
{
|
||||||
|
'kernel_size': self.kernel_size
|
||||||
|
}
|
||||||
|
)
|
||||||
|
base_config = super(RevConv, self).get_config()
|
||||||
|
return dict(list(base_config.items()) + list(config.items()))
|
||||||
|
|
||||||
|
def build(self, input_shape):
|
||||||
|
# print(input_shape)
|
||||||
|
_, _, output_dim = input_shape[0], input_shape[1], input_shape[2]
|
||||||
|
self.conv1 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=self.kernel_size, strides=1,
|
||||||
|
padding='causal',
|
||||||
|
dilation_rate=4)
|
||||||
|
|
||||||
|
self.conv2 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=1, strides=1, padding='causal',
|
||||||
|
dilation_rate=4)
|
||||||
|
# self.b2 = tf.keras.layers.BatchNormalization()
|
||||||
|
|
||||||
|
# self.b3 = tf.keras.layers.BatchNormalization()
|
||||||
|
|
||||||
|
# out = tf.keras.layers.Add()([b1, b2, b3])
|
||||||
|
# out = tf.nn.relu(out)
|
||||||
|
|
||||||
|
def call(self, inputs, **kwargs):
|
||||||
|
conv1 = self.conv1(inputs)
|
||||||
|
b1 = tf.keras.layers.BatchNormalization()(conv1)
|
||||||
|
b1 = tf.nn.leaky_relu(b1)
|
||||||
|
# b1 = self.b1
|
||||||
|
|
||||||
|
conv2 = self.conv2(inputs)
|
||||||
|
b2 = tf.keras.layers.BatchNormalization()(conv2)
|
||||||
|
b2 = tf.nn.leaky_relu(b2)
|
||||||
|
|
||||||
|
b3 = tf.keras.layers.BatchNormalization()(inputs)
|
||||||
|
|
||||||
|
out = tf.keras.layers.Add()([b1, b2, b3])
|
||||||
|
out = tf.nn.relu(out)
|
||||||
|
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
class RevConvBlock(keras.layers.Layer):
|
||||||
|
|
||||||
|
def __init__(self, num: int = 3, kernel_size=3):
|
||||||
|
# 调用父类__init__()方法
|
||||||
|
super(RevConvBlock, self).__init__()
|
||||||
|
self.num = num
|
||||||
|
self.kernel_size = kernel_size
|
||||||
|
self.L = []
|
||||||
|
for i in range(num):
|
||||||
|
RepVGG = RevConv(kernel_size=kernel_size)
|
||||||
|
self.L.append(RepVGG)
|
||||||
|
|
||||||
|
def get_config(self):
|
||||||
|
# 自定义层里面的属性
|
||||||
|
config = (
|
||||||
|
{
|
||||||
|
'kernel_size': self.kernel_size,
|
||||||
|
'num': self.num
|
||||||
|
}
|
||||||
|
)
|
||||||
|
base_config = super(RevConvBlock, self).get_config()
|
||||||
|
return dict(list(base_config.items()) + list(config.items()))
|
||||||
|
|
||||||
|
def call(self, inputs, **kwargs):
|
||||||
|
for i in range(self.num):
|
||||||
|
inputs = self.L[i](inputs)
|
||||||
|
return inputs
|
||||||
|
|
@ -0,0 +1,447 @@
|
||||||
|
# _*_ coding: UTF-8 _*_
|
||||||
|
|
||||||
|
|
||||||
|
'''
|
||||||
|
@Author : dingjiawen
|
||||||
|
@Date : 2022/7/14 9:40
|
||||||
|
@Usage : 联合监测模型
|
||||||
|
@Desc : RNet:LCAU
|
||||||
|
'''
|
||||||
|
|
||||||
|
import tensorflow as tf
|
||||||
|
import tensorflow.keras as keras
|
||||||
|
from tensorflow.keras import *
|
||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
from model.DepthwiseCon1D.DepthwiseConv1D import DepthwiseConv1D
|
||||||
|
from model.Dynamic_channelAttention.SE_channelAttention import SEChannelAttention, DynamicPooling
|
||||||
|
from condition_monitoring.data_deal import loadData
|
||||||
|
from model.LossFunction.smooth_L1_Loss import SmoothL1Loss
|
||||||
|
|
||||||
|
|
||||||
|
class Joint_Monitoring(keras.Model):
|
||||||
|
|
||||||
|
def __init__(self, conv_filter=20):
|
||||||
|
# 调用父类__init__()方法
|
||||||
|
super(Joint_Monitoring, self).__init__()
|
||||||
|
# step one
|
||||||
|
self.RepDCBlock1 = RevConvBlock(num=3, kernel_size=5)
|
||||||
|
self.conv1 = tf.keras.layers.Conv1D(filters=conv_filter, kernel_size=1, strides=2, padding='SAME')
|
||||||
|
self.upsample1 = tf.keras.layers.UpSampling1D(size=2)
|
||||||
|
|
||||||
|
self.DACU2 = SEChannelAttention()
|
||||||
|
self.RepDCBlock2 = RevConvBlock(num=3, kernel_size=3)
|
||||||
|
self.conv2 = tf.keras.layers.Conv1D(filters=2 * conv_filter, kernel_size=1, strides=2, padding='SAME')
|
||||||
|
self.upsample2 = tf.keras.layers.UpSampling1D(size=2)
|
||||||
|
|
||||||
|
self.DACU3 = SEChannelAttention()
|
||||||
|
self.RepDCBlock3 = RevConvBlock(num=3, kernel_size=3)
|
||||||
|
self.p1 = DynamicPooling(pool_size=2)
|
||||||
|
self.conv3 = tf.keras.layers.Conv1D(filters=2 * conv_filter, kernel_size=3, strides=2, padding='SAME')
|
||||||
|
|
||||||
|
self.DACU4 = SEChannelAttention()
|
||||||
|
self.RepDCBlock4 = RevConvBlock(num=3, kernel_size=3)
|
||||||
|
self.p2 = DynamicPooling(pool_size=4)
|
||||||
|
self.conv4 = tf.keras.layers.Conv1D(filters=conv_filter, kernel_size=3, strides=2, padding='SAME')
|
||||||
|
|
||||||
|
self.RepDCBlock5 = RevConvBlock(num=3, kernel_size=3)
|
||||||
|
self.p3 = DynamicPooling(pool_size=2)
|
||||||
|
|
||||||
|
# step two
|
||||||
|
# 重现原数据
|
||||||
|
self.GRU1 = tf.keras.layers.GRU(128, return_sequences=False)
|
||||||
|
self.d1 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu)
|
||||||
|
self.output1 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu)
|
||||||
|
|
||||||
|
self.GRU2 = tf.keras.layers.GRU(128, return_sequences=False)
|
||||||
|
self.d2 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu)
|
||||||
|
self.output2 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu)
|
||||||
|
|
||||||
|
self.GRU3 = tf.keras.layers.GRU(128, return_sequences=False)
|
||||||
|
self.d3 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu)
|
||||||
|
self.output3 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu)
|
||||||
|
|
||||||
|
|
||||||
|
# loss
|
||||||
|
self.train_loss = []
|
||||||
|
|
||||||
|
def call(self, inputs, training=None, mask=None, is_first_time: bool = True):
|
||||||
|
# step one
|
||||||
|
RepDCBlock1 = self.RepDCBlock1(inputs)
|
||||||
|
RepDCBlock1 = tf.keras.layers.BatchNormalization()(RepDCBlock1)
|
||||||
|
conv1 = self.conv1(RepDCBlock1)
|
||||||
|
conv1 = tf.nn.leaky_relu(conv1)
|
||||||
|
conv1 = tf.keras.layers.BatchNormalization()(conv1)
|
||||||
|
upsample1 = self.upsample1(conv1)
|
||||||
|
|
||||||
|
DACU2 = self.DACU2(upsample1)
|
||||||
|
DACU2 = tf.keras.layers.BatchNormalization()(DACU2)
|
||||||
|
RepDCBlock2 = self.RepDCBlock2(DACU2)
|
||||||
|
RepDCBlock2 = tf.keras.layers.BatchNormalization()(RepDCBlock2)
|
||||||
|
conv2 = self.conv2(RepDCBlock2)
|
||||||
|
conv2 = tf.nn.leaky_relu(conv2)
|
||||||
|
conv2 = tf.keras.layers.BatchNormalization()(conv2)
|
||||||
|
upsample2 = self.upsample2(conv2)
|
||||||
|
|
||||||
|
DACU3 = self.DACU3(upsample2)
|
||||||
|
DACU3 = tf.keras.layers.BatchNormalization()(DACU3)
|
||||||
|
RepDCBlock3 = self.RepDCBlock3(DACU3)
|
||||||
|
RepDCBlock3 = tf.keras.layers.BatchNormalization()(RepDCBlock3)
|
||||||
|
conv3 = self.conv3(RepDCBlock3)
|
||||||
|
conv3 = tf.nn.leaky_relu(conv3)
|
||||||
|
conv3 = tf.keras.layers.BatchNormalization()(conv3)
|
||||||
|
|
||||||
|
concat1 = tf.concat([conv2, conv3], axis=1)
|
||||||
|
|
||||||
|
DACU4 = self.DACU4(concat1)
|
||||||
|
DACU4 = tf.keras.layers.BatchNormalization()(DACU4)
|
||||||
|
RepDCBlock4 = self.RepDCBlock4(DACU4)
|
||||||
|
RepDCBlock4 = tf.keras.layers.BatchNormalization()(RepDCBlock4)
|
||||||
|
conv4 = self.conv4(RepDCBlock4)
|
||||||
|
conv4 = tf.nn.leaky_relu(conv4)
|
||||||
|
conv4 = tf.keras.layers.BatchNormalization()(conv4)
|
||||||
|
|
||||||
|
concat2 = tf.concat([conv1, conv4], axis=1)
|
||||||
|
|
||||||
|
RepDCBlock5 = self.RepDCBlock5(concat2)
|
||||||
|
RepDCBlock5 = tf.keras.layers.BatchNormalization()(RepDCBlock5)
|
||||||
|
|
||||||
|
output1 = []
|
||||||
|
output2 = []
|
||||||
|
output3 = []
|
||||||
|
output4 = []
|
||||||
|
|
||||||
|
if is_first_time:
|
||||||
|
# step two
|
||||||
|
# 重现原数据
|
||||||
|
# 接block3
|
||||||
|
GRU1 = self.GRU1(RepDCBlock3)
|
||||||
|
GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
|
||||||
|
d1 = self.d1(GRU1)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output1 = self.output1(d1)
|
||||||
|
# 接block4
|
||||||
|
GRU2 = self.GRU2(RepDCBlock4)
|
||||||
|
GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
|
||||||
|
d2 = self.d2(GRU2)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output2 = self.output2(d2)
|
||||||
|
# 接block5
|
||||||
|
GRU3 = self.GRU3(RepDCBlock5)
|
||||||
|
GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
|
||||||
|
d3 = self.d3(GRU3)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output3 = self.output3(d3)
|
||||||
|
else:
|
||||||
|
GRU1 = self.GRU1(RepDCBlock3)
|
||||||
|
GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
|
||||||
|
d1 = self.d1(GRU1)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output1 = self.output1(d1)
|
||||||
|
# 接block4
|
||||||
|
GRU2 = self.GRU2(RepDCBlock4)
|
||||||
|
GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
|
||||||
|
d2 = self.d2(GRU2)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output2 = self.output2(d2)
|
||||||
|
# 接block5
|
||||||
|
GRU3 = self.GRU3(RepDCBlock5)
|
||||||
|
GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
|
||||||
|
d3 = self.d3(GRU3)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output3 = self.output3(d3)
|
||||||
|
|
||||||
|
# 多尺度动态池化
|
||||||
|
# p1 = self.p1(output1)
|
||||||
|
# B, _, _ = p1.shape
|
||||||
|
# f1 = tf.reshape(p1, shape=[B, -1])
|
||||||
|
# p2 = self.p2(output2)
|
||||||
|
# f2 = tf.reshape(p2, shape=[B, -1])
|
||||||
|
# p3 = self.p3(output3)
|
||||||
|
# f3 = tf.reshape(p3, shape=[B, -1])
|
||||||
|
# step three
|
||||||
|
# 分类器
|
||||||
|
concat3 = tf.concat([output1, output2, output3], axis=1)
|
||||||
|
# dropout = tf.keras.layers.Dropout(0.25)(concat3)
|
||||||
|
d4 = self.d4(concat3)
|
||||||
|
d5 = self.d5(d4)
|
||||||
|
# d4 = tf.keras.layers.BatchNormalization()(d4)
|
||||||
|
output4 = self.output4(d5)
|
||||||
|
|
||||||
|
return output1, output2, output3, output4
|
||||||
|
|
||||||
|
def get_loss(self, inputs_tensor, label1=None, label2=None, is_first_time: bool = True, pred_3=None, pred_4=None,
|
||||||
|
pred_5=None):
|
||||||
|
# step one
|
||||||
|
RepDCBlock1 = self.RepDCBlock1(inputs_tensor)
|
||||||
|
RepDCBlock1 = tf.keras.layers.BatchNormalization()(RepDCBlock1)
|
||||||
|
conv1 = self.conv1(RepDCBlock1)
|
||||||
|
conv1 = tf.nn.leaky_relu(conv1)
|
||||||
|
conv1 = tf.keras.layers.BatchNormalization()(conv1)
|
||||||
|
upsample1 = self.upsample1(conv1)
|
||||||
|
|
||||||
|
DACU2 = self.DACU2(upsample1)
|
||||||
|
DACU2 = tf.keras.layers.BatchNormalization()(DACU2)
|
||||||
|
RepDCBlock2 = self.RepDCBlock2(DACU2)
|
||||||
|
RepDCBlock2 = tf.keras.layers.BatchNormalization()(RepDCBlock2)
|
||||||
|
conv2 = self.conv2(RepDCBlock2)
|
||||||
|
conv2 = tf.nn.leaky_relu(conv2)
|
||||||
|
conv2 = tf.keras.layers.BatchNormalization()(conv2)
|
||||||
|
upsample2 = self.upsample2(conv2)
|
||||||
|
|
||||||
|
DACU3 = self.DACU3(upsample2)
|
||||||
|
DACU3 = tf.keras.layers.BatchNormalization()(DACU3)
|
||||||
|
RepDCBlock3 = self.RepDCBlock3(DACU3)
|
||||||
|
RepDCBlock3 = tf.keras.layers.BatchNormalization()(RepDCBlock3)
|
||||||
|
conv3 = self.conv3(RepDCBlock3)
|
||||||
|
conv3 = tf.nn.leaky_relu(conv3)
|
||||||
|
conv3 = tf.keras.layers.BatchNormalization()(conv3)
|
||||||
|
|
||||||
|
concat1 = tf.concat([conv2, conv3], axis=1)
|
||||||
|
|
||||||
|
DACU4 = self.DACU4(concat1)
|
||||||
|
DACU4 = tf.keras.layers.BatchNormalization()(DACU4)
|
||||||
|
RepDCBlock4 = self.RepDCBlock4(DACU4)
|
||||||
|
RepDCBlock4 = tf.keras.layers.BatchNormalization()(RepDCBlock4)
|
||||||
|
conv4 = self.conv4(RepDCBlock4)
|
||||||
|
conv4 = tf.nn.leaky_relu(conv4)
|
||||||
|
conv4 = tf.keras.layers.BatchNormalization()(conv4)
|
||||||
|
|
||||||
|
concat2 = tf.concat([conv1, conv4], axis=1)
|
||||||
|
|
||||||
|
RepDCBlock5 = self.RepDCBlock5(concat2)
|
||||||
|
RepDCBlock5 = tf.keras.layers.BatchNormalization()(RepDCBlock5)
|
||||||
|
|
||||||
|
if is_first_time:
|
||||||
|
# step two
|
||||||
|
# 重现原数据
|
||||||
|
# 接block3
|
||||||
|
GRU1 = self.GRU1(RepDCBlock3)
|
||||||
|
GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
|
||||||
|
d1 = self.d1(GRU1)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output1 = self.output1(d1)
|
||||||
|
# 接block4
|
||||||
|
GRU2 = self.GRU2(RepDCBlock4)
|
||||||
|
GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
|
||||||
|
d2 = self.d2(GRU2)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output2 = self.output2(d2)
|
||||||
|
# 接block5
|
||||||
|
GRU3 = self.GRU3(RepDCBlock5)
|
||||||
|
GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
|
||||||
|
d3 = self.d3(GRU3)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output3 = self.output3(d3)
|
||||||
|
|
||||||
|
# reduce_mean降维计算均值
|
||||||
|
MSE_loss1 = SmoothL1Loss()(y_true=label1, y_pred=output1)
|
||||||
|
MSE_loss2 = SmoothL1Loss()(y_true=label1, y_pred=output2)
|
||||||
|
MSE_loss3 = SmoothL1Loss()(y_true=label1, y_pred=output3)
|
||||||
|
# MSE_loss1 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output1))
|
||||||
|
# MSE_loss2 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output2))
|
||||||
|
# MSE_loss3 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output3))
|
||||||
|
|
||||||
|
print("MSE_loss1:", MSE_loss1.numpy())
|
||||||
|
print("MSE_loss2:", MSE_loss2.numpy())
|
||||||
|
print("MSE_loss3:", MSE_loss3.numpy())
|
||||||
|
loss = MSE_loss1 + MSE_loss2 + MSE_loss3
|
||||||
|
Accuracy_num = 0
|
||||||
|
|
||||||
|
else:
|
||||||
|
# step two
|
||||||
|
# 重现原数据
|
||||||
|
# 接block3
|
||||||
|
GRU1 = self.GRU1(RepDCBlock3)
|
||||||
|
GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
|
||||||
|
d1 = self.d1(GRU1)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output1 = self.output1(d1)
|
||||||
|
# 接block4
|
||||||
|
GRU2 = self.GRU2(RepDCBlock4)
|
||||||
|
GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
|
||||||
|
d2 = self.d2(GRU2)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output2 = self.output2(d2)
|
||||||
|
# 接block5
|
||||||
|
GRU3 = self.GRU3(RepDCBlock5)
|
||||||
|
GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
|
||||||
|
d3 = self.d3(GRU3)
|
||||||
|
# tf.nn.softmax
|
||||||
|
output3 = self.output3(d3)
|
||||||
|
|
||||||
|
# 多尺度动态池化
|
||||||
|
# p1 = self.p1(output1)
|
||||||
|
# B, _, _ = p1.shape
|
||||||
|
# f1 = tf.reshape(p1, shape=[B, -1])
|
||||||
|
# p2 = self.p2(output2)
|
||||||
|
# f2 = tf.reshape(p2, shape=[B, -1])
|
||||||
|
# p3 = self.p3(output3)
|
||||||
|
# f3 = tf.reshape(p3, shape=[B, -1])
|
||||||
|
# step three
|
||||||
|
# 分类器
|
||||||
|
concat3 = tf.concat([output1, output2, output3], axis=1)
|
||||||
|
# dropout = tf.keras.layers.Dropout(0.25)(concat3)
|
||||||
|
d4 = self.d4(concat3)
|
||||||
|
d5 = self.d5(d4)
|
||||||
|
# d4 = tf.keras.layers.BatchNormalization()(d4)
|
||||||
|
output4 = self.output4(d5)
|
||||||
|
|
||||||
|
# reduce_mean降维计算均值
|
||||||
|
MSE_loss = SmoothL1Loss()(y_true=pred_3, y_pred=output1)
|
||||||
|
MSE_loss += SmoothL1Loss()(y_true=pred_4, y_pred=output2)
|
||||||
|
MSE_loss += SmoothL1Loss()(y_true=pred_5, y_pred=output3)
|
||||||
|
Cross_Entropy_loss = tf.reduce_mean(
|
||||||
|
tf.losses.binary_crossentropy(y_true=label2, y_pred=output4, from_logits=True))
|
||||||
|
|
||||||
|
print("MSE_loss:", MSE_loss.numpy())
|
||||||
|
print("Cross_Entropy_loss:", Cross_Entropy_loss.numpy())
|
||||||
|
Accuracy_num = self.get_Accuracy(label=label2, output=output4)
|
||||||
|
loss = MSE_loss + Cross_Entropy_loss
|
||||||
|
return loss, Accuracy_num
|
||||||
|
|
||||||
|
def get_Accuracy(self, output, label):
|
||||||
|
|
||||||
|
predict_label = tf.round(output)
|
||||||
|
label = tf.cast(label, dtype=tf.float32)
|
||||||
|
|
||||||
|
t = np.array(label - predict_label)
|
||||||
|
|
||||||
|
b = t[t[:] == 0]
|
||||||
|
|
||||||
|
return b.__len__()
|
||||||
|
|
||||||
|
def get_grad(self, input_tensor, label1=None, label2=None, is_first_time: bool = True, pred_3=None, pred_4=None,
|
||||||
|
pred_5=None):
|
||||||
|
with tf.GradientTape() as tape:
|
||||||
|
# todo 原本tape只会监控由tf.Variable创建的trainable=True属性
|
||||||
|
# tape.watch(self.variables)
|
||||||
|
L, Accuracy_num = self.get_loss(input_tensor, label1=label1, label2=label2, is_first_time=is_first_time,
|
||||||
|
pred_3=pred_3,
|
||||||
|
pred_4=pred_4, pred_5=pred_5)
|
||||||
|
# 保存一下loss,用于输出
|
||||||
|
self.train_loss = L
|
||||||
|
g = tape.gradient(L, self.variables)
|
||||||
|
return g, Accuracy_num
|
||||||
|
|
||||||
|
def train(self, input_tensor, label1=None, label2=None, learning_rate=1e-3, is_first_time: bool = True, pred_3=None,
|
||||||
|
pred_4=None, pred_5=None):
|
||||||
|
g, Accuracy_num = self.get_grad(input_tensor, label1=label1, label2=label2, is_first_time=is_first_time,
|
||||||
|
pred_3=pred_3,
|
||||||
|
pred_4=pred_4, pred_5=pred_5)
|
||||||
|
optimizers.Adam(learning_rate).apply_gradients(zip(g, self.variables))
|
||||||
|
return self.train_loss, Accuracy_num
|
||||||
|
|
||||||
|
# 暂时只支持batch_size等于1,不然要传z比较麻烦
|
||||||
|
def get_val_loss(self, val_data, val_label1, val_label2, batch_size=16, is_first_time: bool = True,
|
||||||
|
step_one_model=None):
|
||||||
|
val_loss = []
|
||||||
|
accuracy_num = 0
|
||||||
|
output1 = 0
|
||||||
|
output2 = 0
|
||||||
|
output3 = 0
|
||||||
|
z = 1
|
||||||
|
size, length, dims = val_data.shape
|
||||||
|
if batch_size == None:
|
||||||
|
batch_size = self.batch_size
|
||||||
|
for epoch in range(0, size - batch_size, batch_size):
|
||||||
|
each_val_data = val_data[epoch:epoch + batch_size, :, :]
|
||||||
|
each_val_label1 = val_label1[epoch:epoch + batch_size, :]
|
||||||
|
each_val_label2 = val_label2[epoch:epoch + batch_size, ]
|
||||||
|
# each_val_data = tf.expand_dims(each_val_data, axis=0)
|
||||||
|
# each_val_query = tf.expand_dims(each_val_query, axis=0)
|
||||||
|
# each_val_label = tf.expand_dims(each_val_label, axis=0)
|
||||||
|
if not is_first_time:
|
||||||
|
output1, output2, output3, _ = step_one_model.call(inputs=each_val_data, is_first_time=True)
|
||||||
|
|
||||||
|
each_loss, each_accuracy_num = self.get_loss(each_val_data, each_val_label1, each_val_label2,
|
||||||
|
is_first_time=is_first_time,
|
||||||
|
pred_3=output1, pred_4=output2, pred_5=output3)
|
||||||
|
accuracy_num += each_accuracy_num
|
||||||
|
val_loss.append(each_loss)
|
||||||
|
z += 1
|
||||||
|
|
||||||
|
val_accuracy = accuracy_num / ((z-1) * batch_size)
|
||||||
|
val_total_loss = tf.reduce_mean(val_loss)
|
||||||
|
return val_total_loss, val_accuracy
|
||||||
|
|
||||||
|
|
||||||
|
class RevConv(keras.layers.Layer):
|
||||||
|
|
||||||
|
def __init__(self, kernel_size=3):
|
||||||
|
# 调用父类__init__()方法
|
||||||
|
super(RevConv, self).__init__()
|
||||||
|
self.kernel_size = kernel_size
|
||||||
|
|
||||||
|
def get_config(self):
|
||||||
|
# 自定义层里面的属性
|
||||||
|
config = (
|
||||||
|
{
|
||||||
|
'kernel_size': self.kernel_size
|
||||||
|
}
|
||||||
|
)
|
||||||
|
base_config = super(RevConv, self).get_config()
|
||||||
|
return dict(list(base_config.items()) + list(config.items()))
|
||||||
|
|
||||||
|
def build(self, input_shape):
|
||||||
|
# print(input_shape)
|
||||||
|
_, _, output_dim = input_shape[0], input_shape[1], input_shape[2]
|
||||||
|
self.conv1 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=self.kernel_size, strides=1,
|
||||||
|
padding='causal',
|
||||||
|
dilation_rate=4)
|
||||||
|
|
||||||
|
self.conv2 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=1, strides=1, padding='causal',
|
||||||
|
dilation_rate=4)
|
||||||
|
# self.b2 = tf.keras.layers.BatchNormalization()
|
||||||
|
|
||||||
|
# self.b3 = tf.keras.layers.BatchNormalization()
|
||||||
|
|
||||||
|
# out = tf.keras.layers.Add()([b1, b2, b3])
|
||||||
|
# out = tf.nn.relu(out)
|
||||||
|
|
||||||
|
def call(self, inputs, **kwargs):
|
||||||
|
conv1 = self.conv1(inputs)
|
||||||
|
b1 = tf.keras.layers.BatchNormalization()(conv1)
|
||||||
|
b1 = tf.nn.leaky_relu(b1)
|
||||||
|
# b1 = self.b1
|
||||||
|
|
||||||
|
conv2 = self.conv2(inputs)
|
||||||
|
b2 = tf.keras.layers.BatchNormalization()(conv2)
|
||||||
|
b2 = tf.nn.leaky_relu(b2)
|
||||||
|
|
||||||
|
b3 = tf.keras.layers.BatchNormalization()(inputs)
|
||||||
|
|
||||||
|
out = tf.keras.layers.Add()([b1, b2, b3])
|
||||||
|
out = tf.nn.relu(out)
|
||||||
|
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
class RevConvBlock(keras.layers.Layer):
|
||||||
|
|
||||||
|
def __init__(self, num: int = 3, kernel_size=3):
|
||||||
|
# 调用父类__init__()方法
|
||||||
|
super(RevConvBlock, self).__init__()
|
||||||
|
self.num = num
|
||||||
|
self.kernel_size = kernel_size
|
||||||
|
self.L = []
|
||||||
|
for i in range(num):
|
||||||
|
RepVGG = RevConv(kernel_size=kernel_size)
|
||||||
|
self.L.append(RepVGG)
|
||||||
|
|
||||||
|
def get_config(self):
|
||||||
|
# 自定义层里面的属性
|
||||||
|
config = (
|
||||||
|
{
|
||||||
|
'kernel_size': self.kernel_size,
|
||||||
|
'num': self.num
|
||||||
|
}
|
||||||
|
)
|
||||||
|
base_config = super(RevConvBlock, self).get_config()
|
||||||
|
return dict(list(base_config.items()) + list(config.items()))
|
||||||
|
|
||||||
|
def call(self, inputs, **kwargs):
|
||||||
|
for i in range(self.num):
|
||||||
|
inputs = self.L[i](inputs)
|
||||||
|
return inputs
|
||||||
|
|
@ -0,0 +1,133 @@
|
||||||
|
import tensorflow as tf
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from matplotlib import pyplot as plt
|
||||||
|
from sklearn.manifold import TSNE
|
||||||
|
from sklearn.metrics import confusion_matrix
|
||||||
|
import seaborn as sns
|
||||||
|
|
||||||
|
# 数据读入
|
||||||
|
train_data = np.load("../../../data/train_data.npy")
|
||||||
|
train_label = np.load("../../../data/train_label.npy")
|
||||||
|
test_data = np.load("../../../data/test_data.npy")
|
||||||
|
test_label = np.load("../../../data/test_label.npy")
|
||||||
|
|
||||||
|
|
||||||
|
# CIFAR_100_data = tf.keras.datasets.cifar100
|
||||||
|
# (train_data, train_label), (test_data, test_label) = CIFAR_100_data.load_data()
|
||||||
|
# train_data=np.array(train_data)
|
||||||
|
# train_label=np.array(train_label)
|
||||||
|
# print(train_data.shape)
|
||||||
|
# print(train_label.shape)
|
||||||
|
# print(train_data)
|
||||||
|
# print(test_data)
|
||||||
|
#
|
||||||
|
#
|
||||||
|
def identity_block(input_tensor, out_dim):
|
||||||
|
con1 = tf.keras.layers.Conv2D(filters=out_dim // 4, kernel_size=1, padding='SAME', activation=tf.nn.relu)(
|
||||||
|
input_tensor)
|
||||||
|
bhn1 = tf.keras.layers.BatchNormalization()(con1)
|
||||||
|
|
||||||
|
con2 = tf.keras.layers.Conv2D(filters=out_dim // 4, kernel_size=3, padding='SAME', activation=tf.nn.relu)(bhn1)
|
||||||
|
bhn2 = tf.keras.layers.BatchNormalization()(con2)
|
||||||
|
|
||||||
|
con3 = tf.keras.layers.Conv2D(filters=out_dim, kernel_size=1, padding='SAME', activation=tf.nn.relu)(bhn2)
|
||||||
|
|
||||||
|
out = tf.keras.layers.Add()([input_tensor, con3])
|
||||||
|
out = tf.nn.relu(out)
|
||||||
|
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
def resnet_Model():
|
||||||
|
inputs = tf.keras.Input(shape=[80, 80, 9])
|
||||||
|
conv1 = tf.keras.layers.Conv2D(filters=64, kernel_size=3, padding='SAME', activation=tf.nn.relu)(inputs)
|
||||||
|
'''第一层'''
|
||||||
|
output_dim = 64
|
||||||
|
identity_1 = tf.keras.layers.Conv2D(filters=output_dim, kernel_size=3, padding='SAME', activation=tf.nn.relu)(
|
||||||
|
conv1)
|
||||||
|
identity_1 = tf.keras.layers.BatchNormalization()(identity_1)
|
||||||
|
for _ in range(3):
|
||||||
|
identity_1 = identity_block(identity_1, output_dim)
|
||||||
|
'''第二层'''
|
||||||
|
output_dim = 128
|
||||||
|
identity_2 = tf.keras.layers.Conv2D(filters=output_dim, kernel_size=3, padding='SAME', activation=tf.nn.relu)(
|
||||||
|
identity_1)
|
||||||
|
identity_2 = tf.keras.layers.BatchNormalization()(identity_2)
|
||||||
|
for _ in range(2):
|
||||||
|
identity_2 = identity_block(identity_2, output_dim)
|
||||||
|
'''第三层'''
|
||||||
|
output_dim = 256
|
||||||
|
identity_3 = tf.keras.layers.Conv2D(filters=output_dim, kernel_size=3, padding='SAME', activation=tf.nn.relu)(
|
||||||
|
identity_2)
|
||||||
|
identity_3 = tf.keras.layers.BatchNormalization()(identity_3)
|
||||||
|
for _ in range(3):
|
||||||
|
identity_3 = identity_block(identity_3, output_dim)
|
||||||
|
'''第四层'''
|
||||||
|
output_dim = 512
|
||||||
|
identity_4 = tf.keras.layers.Conv2D(filters=output_dim, kernel_size=3, padding='SAME', activation=tf.nn.relu)(
|
||||||
|
identity_3)
|
||||||
|
identity_4 = tf.keras.layers.BatchNormalization()(identity_4)
|
||||||
|
for _ in range(3):
|
||||||
|
identity_4 = identity_block(identity_4, output_dim)
|
||||||
|
flatten = tf.keras.layers.Flatten()(identity_4)
|
||||||
|
dropout = tf.keras.layers.Dropout(0.217)(flatten)
|
||||||
|
|
||||||
|
dense = tf.keras.layers.Dense(128, activation=tf.nn.relu,)(dropout)
|
||||||
|
dense = tf.keras.layers.BatchNormalization(name="bn_last")(dense)
|
||||||
|
dense = tf.keras.layers.Dense(9, activation=tf.nn.softmax)(dense)
|
||||||
|
model = tf.keras.Model(inputs=inputs, outputs=dense)
|
||||||
|
return model
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
model = resnet_Model()
|
||||||
|
model.compile(optimizer=tf.optimizers.SGD(1e-3,momentum=0.02), loss=tf.losses.sparse_categorical_crossentropy,
|
||||||
|
metrics=['acc'])
|
||||||
|
model.summary()
|
||||||
|
history = model.fit(train_data, train_label, epochs=10, batch_size=10,validation_data=(test_data, test_label))
|
||||||
|
model.save("ResNet_model.h5")
|
||||||
|
# model=tf.keras.models.load_model("../model/ResNet_model.h5")
|
||||||
|
#
|
||||||
|
# trained_data = tf.keras.models.Model(inputs=model.input, outputs=model.get_layer('bn_last').output).predict(
|
||||||
|
# train_data)
|
||||||
|
# predict_label = model.predict(test_data)
|
||||||
|
# predict_label_max = np.argmax(predict_label, axis=1)
|
||||||
|
# predict_label = np.expand_dims(predict_label_max, axis=1)
|
||||||
|
#
|
||||||
|
# confusion_matrix = confusion_matrix(test_label, predict_label)
|
||||||
|
|
||||||
|
tsne = TSNE(n_components=3, verbose=2, perplexity=30,n_iter=5000).fit_transform(trained_data)
|
||||||
|
print("tsne[:,0]", tsne[:, 0])
|
||||||
|
print("tsne[:,1]", tsne[:, 1])
|
||||||
|
print("tsne[:,2]", tsne[:, 2])
|
||||||
|
x, y, z = tsne[:, 0], tsne[:, 1], tsne[:, 2]
|
||||||
|
x = (x - np.min(x)) / (np.max(x) - np.min(x))
|
||||||
|
y = (y - np.min(y)) / (np.max(y) - np.min(y))
|
||||||
|
z = (z - np.min(z)) / (np.max(z) - np.min(z))
|
||||||
|
|
||||||
|
fig1 = plt.figure()
|
||||||
|
ax1 = fig1.add_subplot(projection='3d')
|
||||||
|
ax1.scatter3D(x, y, z, c=train_label, cmap=plt.cm.get_cmap("jet", 10))
|
||||||
|
|
||||||
|
fig2 = plt.figure()
|
||||||
|
ax2 = fig2.add_subplot()
|
||||||
|
sns.heatmap(confusion_matrix, annot=True, fmt="d", cmap='Blues')
|
||||||
|
plt.ylabel('Actual label')
|
||||||
|
plt.xlabel('Predicted label')
|
||||||
|
|
||||||
|
|
||||||
|
# fig3 = plt.figure()
|
||||||
|
# ax3 = fig3.add_subplot()
|
||||||
|
# plt.plot(history.epoch, history.history.get('acc'), label='acc')
|
||||||
|
# plt.plot(history.epoch, history.history.get('val_acc'), label='val_acc')
|
||||||
|
#
|
||||||
|
# fig4 = plt.figure()
|
||||||
|
# ax4 = fig3.add_subplot()
|
||||||
|
# plt.plot(history.epoch, history.history.get('loss'), label='loss')
|
||||||
|
# plt.plot(history.epoch, history.history.get('val_loss'), label='val_loss')
|
||||||
|
plt.legend()
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
score = model.evaluate(test_data, test_label)
|
||||||
|
print('score:', score)
|
||||||
Loading…
Reference in New Issue