From 60a7641c65bbde130f762e1d9558495096ad7790 Mon Sep 17 00:00:00 2001
From: markilue <745518019@qq.com>
Date: Mon, 17 Oct 2022 13:26:09 +0800
Subject: [PATCH] =?UTF-8?q?leecode=E6=9B=B4=E6=96=B0?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../leecode/backtrace/RestoreIpAddresses.java | 168 +++++
.../self_try/compare/RNet-D.py | 680 +++++++++++++++++-
.../self_try/compare/RNet-L.py | 650 ++++++++++++++++-
.../self_try/compare/RNet.py | 172 +++--
.../Light_channelAttention.py | 122 ++++
.../model/Joint_Monitoring/compare/RNet_L.py | 447 ++++++++++++
6 files changed, 2173 insertions(+), 66 deletions(-)
create mode 100644 Leecode/src/main/java/com/markilue/leecode/backtrace/RestoreIpAddresses.java
create mode 100644 TensorFlow_eaxmple/Model_train_test/model/Dynamic_channelAttention/Light_channelAttention.py
create mode 100644 TensorFlow_eaxmple/Model_train_test/model/Joint_Monitoring/compare/RNet_L.py
diff --git a/Leecode/src/main/java/com/markilue/leecode/backtrace/RestoreIpAddresses.java b/Leecode/src/main/java/com/markilue/leecode/backtrace/RestoreIpAddresses.java
new file mode 100644
index 0000000..65977b2
--- /dev/null
+++ b/Leecode/src/main/java/com/markilue/leecode/backtrace/RestoreIpAddresses.java
@@ -0,0 +1,168 @@
+package com.markilue.leecode.backtrace;
+
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * @BelongsProject: Leecode
+ * @BelongsPackage: com.markilue.leecode.backtrace
+ * @Author: markilue
+ * @CreateTime: 2022-10-17 10:25
+ * @Description: TODO 力扣93题 复原IP地址:
+ * 有效 IP 地址 正好由四个整数(每个整数位于 0 到 255 之间组成,且不能含有前导 0),整数之间用 '.' 分隔。
+ *
+ * 例如:"0.1.2.201" 和 "192.168.1.1" 是 有效 IP 地址,但是 "0.011.255.245"、"192.168.1.312" 和 "192.168@1.1" 是 无效 IP 地址。
+ * 给定一个只包含数字的字符串 s ,用以表示一个 IP 地址,返回所有可能的有效 IP 地址,这些地址可以通过在 s 中插入 '.' 来形成。你 不能 重新排序或删除 s 中的任何数字。你可以按 任何 顺序返回答案。
+ * @Version: 1.0
+ */
+public class RestoreIpAddresses {
+
+ @Test
+ public void test1() {
+ String s = "25525511135";
+ System.out.println(restoreIpAddresses(s));
+ }
+
+ @Test
+ public void test2() {
+ String s = "0000";
+ System.out.println(restoreIpAddresses(s));
+ }
+
+ @Test
+ public void test3() {
+ String s = "101023";
+ System.out.println(restoreIpAddresses(s));
+ }
+
+
+ @Test
+ public void test4() {
+ String s = "0279245587303";
+ System.out.println(restoreIpAddresses(s));
+ }
+
+ /**
+ * 回溯算法:与partition切割回文有点像,就是切割完的判断条件不一样
+ * 速度击败95.41%,内存击败43.81%
+ * @param s
+ * @return
+ */
+ public List restoreIpAddresses(String s) {
+ if(s.length()>12){
+ return result;
+ }
+ backtracking(s, 0, 0);
+ return result;
+
+ }
+
+ List result = new ArrayList<>();
+ StringBuilder builder = new StringBuilder();
+
+ public void backtracking(String s, int start, int num) {
+
+// if (start >= s.length()) {
+// result.add(builder.toString());
+// return;
+// }
+
+ if (num == 3) {
+ if (valid(s, start, s.length())) {
+ String substring = s.substring(start, s.length());
+ builder.append(substring);
+ result.add(builder.toString());
+ builder.delete(builder.length()-substring.length(),builder.length());
+ return;
+ }
+ return;
+ }
+
+ for (int i = start+1; i < s.length() && i < start + 4; i++) {
+ if (valid(s, start, i)) {
+ builder.append(s.substring(start, i));
+ builder.append(".");
+
+ }else {
+ return;
+ }
+ backtracking(s, i , num + 1);
+ builder.delete(builder.length() - (i-start)-1, builder.length());
+ }
+
+ }
+
+ @Test
+ public void test() {
+ String s = "03";
+// System.out.println(s.substring(0, 0));
+ System.out.println(valid(s, 0, s.length()));
+ }
+
+ public boolean valid(String s, int start, int end) {
+ String substring = s.substring(start, end);
+ //避免开始是0的一个数
+ if(substring.length()>1&&substring.startsWith("0")){
+ return false;
+ }
+ int num = Integer.parseInt(substring);
+ return num >= 0 && num <= 255;
+ }
+
+
+ /**
+ * 官方回溯法
+ */
+ static final int SEG_COUNT = 4;
+ List ans = new ArrayList();
+ int[] segments = new int[SEG_COUNT];
+
+ public List restoreIpAddresses1(String s) {
+ segments = new int[SEG_COUNT];
+ dfs(s, 0, 0);
+ return ans;
+ }
+
+ public void dfs(String s, int segId, int segStart) {
+ // 如果找到了 4 段 IP 地址并且遍历完了字符串,那么就是一种答案
+ if (segId == SEG_COUNT) {
+ if (segStart == s.length()) {
+ StringBuffer ipAddr = new StringBuffer();
+ for (int i = 0; i < SEG_COUNT; ++i) {
+ ipAddr.append(segments[i]);
+ if (i != SEG_COUNT - 1) {
+ ipAddr.append('.');
+ }
+ }
+ ans.add(ipAddr.toString());
+ }
+ return;
+ }
+
+ // 如果还没有找到 4 段 IP 地址就已经遍历完了字符串,那么提前回溯
+ if (segStart == s.length()) {
+ return;
+ }
+
+ // 由于不能有前导零,如果当前数字为 0,那么这一段 IP 地址只能为 0
+ if (s.charAt(segStart) == '0') {
+ segments[segId] = 0;
+ dfs(s, segId + 1, segStart + 1);
+ }
+
+ // 一般情况,枚举每一种可能性并递归
+ int addr = 0;
+ for (int segEnd = segStart; segEnd < s.length(); ++segEnd) {
+ addr = addr * 10 + (s.charAt(segEnd) - '0');
+ if (addr > 0 && addr <= 0xFF) {
+ segments[segId] = addr;
+ dfs(s, segId + 1, segEnd + 1);
+ } else {
+ break;
+ }
+ }
+ }
+
+}
diff --git a/TensorFlow_eaxmple/Model_train_test/condition_monitoring/self_try/compare/RNet-D.py b/TensorFlow_eaxmple/Model_train_test/condition_monitoring/self_try/compare/RNet-D.py
index 5cd109b..fdf88c3 100644
--- a/TensorFlow_eaxmple/Model_train_test/condition_monitoring/self_try/compare/RNet-D.py
+++ b/TensorFlow_eaxmple/Model_train_test/condition_monitoring/self_try/compare/RNet-D.py
@@ -6,5 +6,681 @@
@Author : dingjiawen
@Date : 2022/10/11 18:54
@Usage :
-@Desc :
-'''
\ No newline at end of file
+@Desc : 本人模型,只预测不分类,使用3西格玛原则
+'''
+
+import tensorflow as tf
+import tensorflow.keras
+import numpy as np
+import pandas as pd
+import matplotlib.pyplot as plt
+from model.DepthwiseCon1D.DepthwiseConv1D import DepthwiseConv1D
+from model.Dynamic_channelAttention.Dynamic_channelAttention import DynamicChannelAttention
+from condition_monitoring.data_deal import loadData
+from model.Joint_Monitoring.Joint_Monitoring3 import Joint_Monitoring
+
+from model.CommonFunction.CommonFunction import *
+from sklearn.model_selection import train_test_split
+from tensorflow.keras.models import load_model, save_model
+import random
+
+'''超参数设置'''
+time_stamp = 120
+feature_num = 10
+batch_size = 16
+learning_rate = 0.001
+EPOCH = 101
+model_name = "RNet_D"
+'''EWMA超参数'''
+K = 18
+namuda = 0.01
+'''保存名称'''
+save_name = "E:\self_example\TensorFlow_eaxmple\Model_train_test\condition_monitoring\hard_model\weight\joint_timestamp120_feature10_weight_epoch11_0.0077/weight"
+# save_name = "../hard_model/weight/{0}_timestamp{1}_feature{2}_weight_epoch11_0.0077/weight".format(model_name,
+# time_stamp,
+# feature_num,
+# batch_size,
+# EPOCH)
+save_step_two_name = "../hard_model/two_weight/{0}_timestamp{1}_feature{2}_weight_epoch14/weight".format(model_name,
+ time_stamp,
+ feature_num,
+ batch_size,
+ EPOCH)
+
+save_mse_name = "./mse/RNet_D/{0}_timestamp{1}_feature{2}_mse.csv".format(model_name,
+ time_stamp,
+ feature_num,
+ batch_size,
+ EPOCH)
+save_max_name = "./mse/RNet_D/{0}_timestamp{1}_feature{2}_max.csv".format(model_name,
+ time_stamp,
+ feature_num,
+ batch_size,
+ EPOCH)
+
+# save_name = "../model/joint/{0}_timestamp{1}_feature{2}.h5".format(model_name,
+# time_stamp,
+# feature_num,
+# batch_size,
+# EPOCH)
+# save_step_two_name = "../model/joint_two/{0}_timestamp{1}_feature{2}.h5".format(model_name,
+# time_stamp,
+# feature_num,
+# batch_size,
+# EPOCH)
+'''文件名'''
+file_name = "G:\data\SCADA数据\jb4q_8_delete_total_zero.csv"
+
+'''
+文件说明:jb4q_8_delete_total_zero.csv是删除了只删除了全是0的列的文件
+文件从0:415548行均是正常值(2019/7.30 00:00:00 - 2019/9/18 11:14:00)
+从415549:432153行均是异常值(2019/9/18 11:21:01 - 2021/1/18 00:00:00)
+'''
+'''文件参数'''
+# 最后正常的时间点
+healthy_date = 415548
+# 最后异常的时间点
+unhealthy_date = 432153
+# 异常容忍程度
+unhealthy_patience = 5
+
+
+def remove(data, time_stamp=time_stamp):
+ rows, cols = data.shape
+ print("remove_data.shape:", data.shape)
+ num = int(rows / time_stamp)
+
+ return data[:num * time_stamp, :]
+ pass
+
+
+# 不重叠采样
+def get_training_data(data, time_stamp: int = time_stamp):
+ removed_data = remove(data=data)
+ rows, cols = removed_data.shape
+ print("removed_data.shape:", data.shape)
+ print("removed_data:", removed_data)
+ train_data = np.reshape(removed_data, [-1, time_stamp, cols])
+ print("train_data:", train_data)
+ batchs, time_stamp, cols = train_data.shape
+
+ for i in range(1, batchs):
+ each_label = np.expand_dims(train_data[i, 0, :], axis=0)
+ if i == 1:
+ train_label = each_label
+ else:
+ train_label = np.concatenate([train_label, each_label], axis=0)
+
+ print("train_data.shape:", train_data.shape)
+ print("train_label.shape", train_label.shape)
+ return train_data[:-1, :], train_label
+
+
+# 重叠采样
+def get_training_data_overlapping(data, time_stamp: int = time_stamp, is_Healthy: bool = True):
+ rows, cols = data.shape
+ train_data = np.empty(shape=[rows - time_stamp - 1, time_stamp, cols])
+ train_label = np.empty(shape=[rows - time_stamp - 1, cols])
+ for i in range(rows):
+ if i + time_stamp >= rows:
+ break
+ if i + time_stamp < rows - 1:
+ train_data[i] = data[i:i + time_stamp]
+ train_label[i] = data[i + time_stamp]
+
+ print("重叠采样以后:")
+ print("data:", train_data) # (300334,120,10)
+ print("label:", train_label) # (300334,10)
+
+ if is_Healthy:
+ train_label2 = np.ones(shape=[train_label.shape[0]])
+ else:
+ train_label2 = np.zeros(shape=[train_label.shape[0]])
+
+ print("label2:", train_label2)
+
+ return train_data, train_label, train_label2
+
+
+# RepConv重参数化卷积
+def RepConv(input_tensor, k=3):
+ _, _, output_dim = input_tensor.shape
+ conv1 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=k, strides=1, padding='SAME')(input_tensor)
+ b1 = tf.keras.layers.BatchNormalization()(conv1)
+
+ conv2 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=1, strides=1, padding='SAME')(input_tensor)
+ b2 = tf.keras.layers.BatchNormalization()(conv2)
+
+ b3 = tf.keras.layers.BatchNormalization()(input_tensor)
+
+ out = tf.keras.layers.Add()([b1, b2, b3])
+ out = tf.nn.relu(out)
+ return out
+
+
+# RepBlock模块
+def RepBlock(input_tensor, num: int = 3):
+ for i in range(num):
+ input_tensor = RepConv(input_tensor)
+ return input_tensor
+
+
+# GAP 全局平均池化
+def Global_avg_channelAttention(input_tensor):
+ _, length, channel = input_tensor.shape
+ DWC1 = DepthwiseConv1D(kernel_size=1, padding='SAME')(input_tensor)
+ GAP = tf.keras.layers.GlobalAvgPool1D()(DWC1)
+ c1 = tf.keras.layers.Conv1D(filters=channel, kernel_size=1, padding='SAME')(GAP)
+ s1 = tf.nn.sigmoid(c1)
+ output = tf.multiply(input_tensor, s1)
+ return output
+
+
+# GDP 全局动态池化
+def Global_Dynamic_channelAttention(input_tensor):
+ _, length, channel = input_tensor.shape
+ DWC1 = DepthwiseConv1D(kernel_size=1, padding='SAME')(input_tensor)
+
+ # GAP
+ GAP = tf.keras.layers.GlobalAvgPool1D()(DWC1)
+ c1 = tf.keras.layers.Conv1D(filters=channel, kernel_size=1, padding='SAME')(GAP)
+ s1 = tf.nn.sigmoid(c1)
+
+ # GMP
+ GMP = tf.keras.layers.GlobalMaxPool1D()(DWC1)
+ c2 = tf.keras.layers.Conv1D(filters=channel, kernel_size=1, padding='SAME')(GMP)
+ s3 = tf.nn.sigmoid(c2)
+
+ output = tf.multiply(input_tensor, s1)
+ return output
+
+
+# 归一化
+def normalization(data):
+ rows, cols = data.shape
+ print("归一化之前:", data)
+ print(data.shape)
+ print("======================")
+
+ # 归一化
+ max = np.max(data, axis=0)
+ max = np.broadcast_to(max, [rows, cols])
+ min = np.min(data, axis=0)
+ min = np.broadcast_to(min, [rows, cols])
+
+ data = (data - min) / (max - min)
+ print("归一化之后:", data)
+ print(data.shape)
+
+ return data
+
+
+# 正则化
+def Regularization(data):
+ rows, cols = data.shape
+ print("正则化之前:", data)
+ print(data.shape)
+ print("======================")
+
+ # 正则化
+ mean = np.mean(data, axis=0)
+ mean = np.broadcast_to(mean, shape=[rows, cols])
+ dst = np.sqrt(np.var(data, axis=0))
+ dst = np.broadcast_to(dst, shape=[rows, cols])
+ data = (data - mean) / dst
+ print("正则化之后:", data)
+ print(data.shape)
+
+ return data
+ pass
+
+
+def EWMA(data, K=K, namuda=namuda):
+ # t是啥暂时未知
+ t = 0
+ mid = np.mean(data, axis=0)
+ standard = np.sqrt(np.var(data, axis=0))
+ UCL = mid + K * standard * np.sqrt(namuda / (2 - namuda) * (1 - (1 - namuda) ** 2 * t))
+ LCL = mid - K * standard * np.sqrt(namuda / (2 - namuda) * (1 - (1 - namuda) ** 2 * t))
+ return mid, UCL, LCL
+ pass
+
+
+
+
+
+def condition_monitoring_model():
+ input = tf.keras.Input(shape=[time_stamp, feature_num])
+ conv1 = tf.keras.layers.Conv1D(filters=256, kernel_size=1)(input)
+ GRU1 = tf.keras.layers.GRU(128, return_sequences=False)(conv1)
+ d1 = tf.keras.layers.Dense(300)(GRU1)
+ output = tf.keras.layers.Dense(10)(d1)
+
+ model = tf.keras.Model(inputs=input, outputs=output)
+
+ return model
+
+
+# trian_data:(300455,120,10)
+# trian_label1:(300455,10)
+# trian_label2:(300455,)
+def shuffle(train_data, train_label1, train_label2, is_split: bool = False, split_size: float = 0.2):
+ (train_data, test_data, train_label1, test_label1, train_label2, test_label2) = train_test_split(train_data,
+ train_label1,
+ train_label2,
+ test_size=split_size,
+ shuffle=True,
+ random_state=100)
+ if is_split:
+ return train_data, train_label1, train_label2, test_data, test_label1, test_label2
+ train_data = np.concatenate([train_data, test_data], axis=0)
+ train_label1 = np.concatenate([train_label1, test_label1], axis=0)
+ train_label2 = np.concatenate([train_label2, test_label2], axis=0)
+ # print(train_data.shape)
+ # print(train_label1.shape)
+ # print(train_label2.shape)
+ # print(train_data.shape)
+
+ return train_data, train_label1, train_label2
+ pass
+
+
+def split_test_data(healthy_data, healthy_label1, healthy_label2, unhealthy_data, unhealthy_label1, unhealthy_label2,
+ split_size: float = 0.2, shuffle: bool = True):
+ data = np.concatenate([healthy_data, unhealthy_data], axis=0)
+ label1 = np.concatenate([healthy_label1, unhealthy_label1], axis=0)
+ label2 = np.concatenate([healthy_label2, unhealthy_label2], axis=0)
+ (train_data, test_data, train_label1, test_label1, train_label2, test_label2) = train_test_split(data,
+ label1,
+ label2,
+ test_size=split_size,
+ shuffle=shuffle,
+ random_state=100)
+
+ # print(train_data.shape)
+ # print(train_label1.shape)
+ # print(train_label2.shape)
+ # print(train_data.shape)
+
+ return train_data, train_label1, train_label2, test_data, test_label1, test_label2
+
+ pass
+
+
+# trian_data:(300455,120,10)
+# trian_label1:(300455,10)
+# trian_label2:(300455,)
+def train_step_one(train_data, train_label1, train_label2):
+ model = Joint_Monitoring()
+ # # # # TODO 需要运行编译一次,才能打印model.summary()
+ # model.build(input_shape=(batch_size, filter_num, dims))
+ # model.summary()
+ history_loss = []
+ history_val_loss = []
+ learning_rate = 1e-3
+ for epoch in range(EPOCH):
+
+ print()
+ print("EPOCH:", epoch, "/", EPOCH, ":")
+ train_data, train_label1, train_label2 = shuffle(train_data, train_label1, train_label2)
+ if epoch == 0:
+ train_data, train_label1, train_label2, val_data, val_label1, val_label2 = shuffle(train_data, train_label1,
+ train_label2,
+ is_split=True)
+ # print()
+ # print("EPOCH:", epoch, "/", EPOCH, ":")
+ # 用于让train知道,这是这个epoch中的第几次训练
+ z = 0
+ # 用于batch_size次再训练
+ k = 1
+ for data_1, label_1, label_2 in zip(train_data, train_label1, train_label2):
+ size, _, _ = train_data.shape
+ data_1 = tf.expand_dims(data_1, axis=0)
+ label_1 = tf.expand_dims(label_1, axis=0)
+ label_2 = tf.expand_dims(label_2, axis=0)
+ if batch_size != 1:
+ if k % batch_size == 1:
+ data = data_1
+ label1 = label_1
+ label2 = label_2
+ else:
+ data = tf.concat([data, data_1], axis=0)
+ label1 = tf.concat([label1, label_1], axis=0)
+ label2 = tf.concat([label2, label_2], axis=0)
+ else:
+ data = data_1
+ label1 = label_1
+ label2 = label_2
+
+ if k % batch_size == 0:
+ # label = tf.expand_dims(label, axis=-1)
+ loss_value, accuracy_value = model.train(input_tensor=data, label1=label1, label2=label2,
+ learning_rate=learning_rate,
+ is_first_time=True)
+ print(z * batch_size, "/", size, ":===============>", "loss:", loss_value.numpy())
+ k = 0
+ z = z + 1
+ k = k + 1
+ val_loss, val_accuracy = model.get_val_loss(val_data=val_data, val_label1=val_label1, val_label2=val_label2,
+ is_first_time=True)
+ SaveBestModel(model=model, save_name=save_name, history_loss=history_val_loss, loss_value=val_loss.numpy())
+ # SaveBestH5Model(model=model, save_name=save_name, history_loss=history_val_loss, loss_value=val_loss.numpy())
+ history_val_loss.append(val_loss)
+ history_loss.append(loss_value.numpy())
+ print('Training loss is :', loss_value.numpy())
+ print('Validating loss is :', val_loss.numpy())
+ if IsStopTraining(history_loss=history_val_loss, patience=7):
+ break
+ if Is_Reduce_learning_rate(history_loss=history_val_loss, patience=3):
+ if learning_rate >= 1e-4:
+ learning_rate = learning_rate * 0.1
+ pass
+
+
+def train_step_two(step_one_model, step_two_model, train_data, train_label1, train_label2):
+ # step_two_model = Joint_Monitoring()
+ # step_two_model.build(input_shape=(batch_size, time_stamp, feature_num))
+ # step_two_model.summary()
+ history_loss = []
+ history_val_loss = []
+ history_accuracy = []
+ learning_rate = 1e-3
+ for epoch in range(EPOCH):
+ print()
+ print("EPOCH:", epoch, "/", EPOCH, ":")
+ train_data, train_label1, train_label2 = shuffle(train_data, train_label1, train_label2)
+ if epoch == 0:
+ train_data, train_label1, train_label2, val_data, val_label1, val_label2 = shuffle(train_data, train_label1,
+ train_label2,
+ is_split=True)
+ # print()
+ # print("EPOCH:", epoch, "/", EPOCH, ":")
+ # 用于让train知道,这是这个epoch中的第几次训练
+ z = 0
+ # 用于batch_size次再训练
+ k = 1
+ accuracy_num = 0
+ for data_1, label_1, label_2 in zip(train_data, train_label1, train_label2):
+ size, _, _ = train_data.shape
+ data_1 = tf.expand_dims(data_1, axis=0)
+ label_1 = tf.expand_dims(label_1, axis=0)
+ label_2 = tf.expand_dims(label_2, axis=0)
+ if batch_size != 1:
+ if k % batch_size == 1:
+ data = data_1
+ label1 = label_1
+ label2 = label_2
+ else:
+ data = tf.concat([data, data_1], axis=0)
+ label1 = tf.concat([label1, label_1], axis=0)
+ label2 = tf.concat([label2, label_2], axis=0)
+ else:
+ data = data_1
+ label1 = label_1
+ label2 = label_2
+
+ if k % batch_size == 0:
+ # label = tf.expand_dims(label, axis=-1)
+ output1, output2, output3, _ = step_one_model.call(inputs=data, is_first_time=True)
+ loss_value, accuracy_value = step_two_model.train(input_tensor=data, label1=label1, label2=label2,
+ learning_rate=learning_rate,
+ is_first_time=False, pred_3=output1, pred_4=output2,
+ pred_5=output3)
+ accuracy_num += accuracy_value
+ print(z * batch_size, "/", size, ":===============>", "loss:", loss_value.numpy(), "| accuracy:",
+ accuracy_num / ((z + 1) * batch_size))
+ k = 0
+ z = z + 1
+ k = k + 1
+
+ val_loss, val_accuracy = step_two_model.get_val_loss(val_data=val_data, val_label1=val_label1,
+ val_label2=val_label2,
+ is_first_time=False, step_one_model=step_one_model)
+ SaveBestModelByAccuracy(model=step_two_model, save_name=save_step_two_name, history_accuracy=history_accuracy,
+ accuracy_value=val_accuracy)
+ history_val_loss.append(val_loss)
+ history_loss.append(loss_value.numpy())
+ history_accuracy.append(val_accuracy)
+ print('Training loss is : {0} | Training accuracy is : {1}'.format(loss_value.numpy(),
+ accuracy_num / ((z + 1) * batch_size)))
+ print('Validating loss is : {0} | Validating accuracy is : {1}'.format(val_loss.numpy(), val_accuracy))
+ if IsStopTraining(history_loss=history_val_loss, patience=7):
+ break
+ if Is_Reduce_learning_rate(history_loss=history_val_loss, patience=3):
+ if learning_rate >= 1e-4:
+ learning_rate = learning_rate * 0.1
+ pass
+
+
+def test(step_one_model, step_two_model, test_data, test_label1, test_label2):
+ history_loss = []
+ history_val_loss = []
+
+ val_loss, val_accuracy = step_two_model.get_val_loss(val_data=test_data, val_label1=test_label1,
+ val_label2=test_label2,
+ is_first_time=False, step_one_model=step_one_model)
+
+ history_val_loss.append(val_loss)
+ print("val_accuracy:", val_accuracy)
+ print("val_loss:", val_loss)
+
+
+def showResult(step_two_model: Joint_Monitoring, test_data, isPlot: bool = False):
+ # 获取模型的所有参数的个数
+ # step_two_model.count_params()
+ total_result = []
+ size, length, dims = test_data.shape
+ for epoch in range(0, size - batch_size + 1, batch_size):
+ each_test_data = test_data[epoch:epoch + batch_size, :, :]
+ _, _, _, output4 = step_two_model.call(each_test_data, is_first_time=False)
+ total_result.append(output4)
+ total_result = np.reshape(total_result, [total_result.__len__(), -1])
+ total_result = np.reshape(total_result, [-1, ])
+ if isPlot:
+ plt.scatter(list(range(total_result.shape[0])), total_result, c='black', s=10)
+ # 画出 y=1 这条水平线
+ plt.axhline(0.5, c='red', label='Failure threshold')
+ # 箭头指向上面的水平线
+ # plt.arrow(35000, 0.9, 33000, 0.75, head_width=0.02, head_length=0.1, shape="full", fc='red', ec='red',
+ # alpha=0.9, overhang=0.5)
+ # plt.text(35000, 0.9, "Truth Fault", fontsize=10, color='black', verticalalignment='top')
+ plt.axvline(test_data.shape[0] * 2 / 3, c='blue', ls='-.')
+ plt.xlabel("time")
+ plt.ylabel("confience")
+ plt.text(total_result.shape[0] * 4 / 5, 0.6, "Fault", fontsize=10, color='black', verticalalignment='top',
+ horizontalalignment='center',
+ bbox={'facecolor': 'grey',
+ 'pad': 10})
+ plt.text(total_result.shape[0] * 1 / 3, 0.4, "Norm", fontsize=10, color='black', verticalalignment='top',
+ horizontalalignment='center',
+ bbox={'facecolor': 'grey',
+ 'pad': 10})
+ plt.grid()
+ # plt.ylim(0, 1)
+ # plt.xlim(-50, 1300)
+ # plt.legend("", loc='upper left')
+ plt.show()
+ return total_result
+
+
+def get_MSE(data, label, new_model, isStandard: bool = True, isPlot: bool = True, predictI: int = 1):
+ predicted_data1 = []
+ predicted_data2 = []
+ predicted_data3 = []
+ size, length, dims = data.shape
+ for epoch in range(0, size, batch_size):
+ each_test_data = data[epoch:epoch + batch_size, :, :]
+ output1, output2, output3, _ = new_model.call(inputs=each_test_data, is_first_time=True)
+ if epoch == 0:
+ predicted_data1 = output1
+ predicted_data2 = output2
+ predicted_data3 = output3
+ else:
+ predicted_data1 = np.concatenate([predicted_data1, output1], axis=0)
+ predicted_data2 = np.concatenate([predicted_data2, output2], axis=0)
+ predicted_data3 = np.concatenate([predicted_data3, output3], axis=0)
+
+ predicted_data1 = np.reshape(predicted_data1, [-1, 10])
+ predicted_data2 = np.reshape(predicted_data2, [-1, 10])
+ predicted_data3 = np.reshape(predicted_data3, [-1, 10])
+ predict_data = 0
+
+ predict_data = predicted_data1
+ mseList = []
+ meanList = []
+ maxList = []
+
+ for i in range(1, 4):
+ print("i:", i)
+ if i == 1:
+ predict_data = predicted_data1
+ elif i == 2:
+ predict_data = predicted_data2
+ elif i == 3:
+ predict_data = predicted_data3
+ temp = np.abs(predict_data - label)
+ temp1 = (temp - np.broadcast_to(np.mean(temp, axis=0), shape=predict_data.shape))
+ temp2 = np.broadcast_to(np.sqrt(np.var(temp, axis=0)), shape=predict_data.shape)
+ temp3 = temp1 / temp2
+ mse = np.sum((temp1 / temp2) ** 2, axis=1)
+
+ print("mse.shape:", mse.shape)
+ # mse=np.mean((predicted_data-label)**2,axis=1)
+ # print("mse", mse)
+ mseList.append(mse)
+ if isStandard:
+ dims, = mse.shape
+ mean = np.mean(mse)
+ std = np.sqrt(np.var(mse))
+ max = mean + 3 * std
+ print("max.shape:", max.shape)
+ # min = mean-3*std
+ max = np.broadcast_to(max, shape=[dims, ])
+ # min = np.broadcast_to(min,shape=[dims,])
+ mean = np.broadcast_to(mean, shape=[dims, ])
+ if isPlot:
+ plt.figure(random.randint(1, 100))
+ plt.plot(max)
+ plt.plot(mse)
+ plt.plot(mean)
+ # plt.plot(min)
+ plt.show()
+ maxList.append(max)
+ meanList.append(mean)
+ else:
+ if isPlot:
+ plt.figure(random.randint(1, 100))
+ plt.plot(mse)
+ # plt.plot(min)
+ plt.show()
+
+ return mseList, meanList, maxList
+ # pass
+
+
+# healthy_data是健康数据,用于确定阈值,all_data是完整的数据,用于模型出结果
+def getResult(model: tf.keras.Model, healthy_data, healthy_label, unhealthy_data, unhealthy_label, isPlot: bool = False,
+ isSave: bool = False, predictI: int = 1):
+ # TODO 计算MSE确定阈值
+ # plt.ion()
+ mseList, meanList, maxList = get_MSE(healthy_data, healthy_label, model)
+ mse1List, _, _ = get_MSE(unhealthy_data, unhealthy_label, model, isStandard=False)
+
+ for mse, mean, max, mse1,j in zip(mseList, meanList, maxList, mse1List,range(3)):
+
+ # 误报率的计算
+ total, = mse.shape
+ faultNum = 0
+ faultList = []
+ for i in range(total):
+ if (mse[i] > max[i]):
+ faultNum += 1
+ faultList.append(mse[i])
+
+ fault_rate = faultNum / total
+ print("误报率:", fault_rate)
+
+ # 漏报率计算
+ missNum = 0
+ missList = []
+ all, = mse1.shape
+ for i in range(all):
+ if (mse1[i] < max[0]):
+ missNum += 1
+ missList.append(mse1[i])
+
+ miss_rate = missNum / all
+ print("漏报率:", miss_rate)
+
+ # 总体图
+ print("mse:", mse)
+ print("mse1:", mse1)
+ print("============================================")
+ total_mse = np.concatenate([mse, mse1], axis=0)
+ total_max = np.broadcast_to(max[0], shape=[total_mse.shape[0], ])
+ # min = np.broadcast_to(min,shape=[dims,])
+ total_mean = np.broadcast_to(mean[0], shape=[total_mse.shape[0], ])
+
+ if isSave:
+ save_mse_name1=save_mse_name[:-4]+"_predict"+str(j+1)+".csv"
+ save_max_name1=save_max_name[:-4]+"_predict"+str(j+1)+".csv"
+
+ np.savetxt(save_mse_name1,total_mse, delimiter=',')
+ np.savetxt(save_max_name1,total_max, delimiter=',')
+
+
+ plt.figure(random.randint(1, 100))
+ plt.plot(total_max)
+ plt.plot(total_mse)
+ plt.plot(total_mean)
+ # plt.plot(min)
+ plt.show()
+ pass
+
+
+if __name__ == '__main__':
+ total_data = loadData.execute(N=feature_num, file_name=file_name)
+ total_data = normalization(data=total_data)
+ train_data_healthy, train_label1_healthy, train_label2_healthy = get_training_data_overlapping(
+ total_data[:healthy_date, :], is_Healthy=True)
+ train_data_unhealthy, train_label1_unhealthy, train_label2_unhealthy = get_training_data_overlapping(
+ total_data[healthy_date - time_stamp + unhealthy_patience:unhealthy_date, :],
+ is_Healthy=False)
+ #### TODO 第一步训练
+ # 单次测试
+ # train_step_one(train_data=train_data_healthy[:32, :, :], train_label1=train_label1_healthy[:32, :],train_label2=train_label2_healthy[:32, ])
+ # train_step_one(train_data=train_data_healthy, train_label1=train_label1_healthy, train_label2=train_label2_healthy)
+
+ # 导入第一步已经训练好的模型,一个继续训练,一个只输出结果
+ step_one_model = Joint_Monitoring()
+ step_one_model.load_weights(save_name)
+ #
+ # step_two_model = Joint_Monitoring()
+ # step_two_model.load_weights(save_name)
+
+ healthy_size, _, _ = train_data_healthy.shape
+ unhealthy_size, _, _ = train_data_unhealthy.shape
+ all_data, _, _ = get_training_data_overlapping(
+ total_data[healthy_size - 2 * unhealthy_size:unhealthy_date, :], is_Healthy=True)
+
+ ##出结果单次测试
+ # getResult(step_one_model,
+ # healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:healthy_size - 2 * unhealthy_size + 200,
+ # :],
+ # healthy_label=train_label1_healthy[
+ # healthy_size - 2 * unhealthy_size:healthy_size - 2 * unhealthy_size + 200, :],
+ # unhealthy_data=train_data_unhealthy[:200, :], unhealthy_label=train_label1_unhealthy[:200, :],isSave=True)
+
+ getResult(step_one_model, healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:, :],
+ healthy_label=train_label1_healthy[healthy_size - 2 * unhealthy_size:, :],
+ unhealthy_data=train_data_unhealthy, unhealthy_label=train_label1_unhealthy,isSave=True)
+
+ ###TODO 展示全部的结果
+ # all_data, _, _ = get_training_data_overlapping(
+ # total_data[healthy_size - 2 * unhealthy_size:unhealthy_date, :], is_Healthy=True)
+ # all_data = np.concatenate([])
+ # 单次测试
+ # showResult(step_two_model, test_data=all_data[:32], isPlot=True)
+ # showResult(step_two_model, test_data=all_data, isPlot=True)
+
+ pass
diff --git a/TensorFlow_eaxmple/Model_train_test/condition_monitoring/self_try/compare/RNet-L.py b/TensorFlow_eaxmple/Model_train_test/condition_monitoring/self_try/compare/RNet-L.py
index 18442fc..a214cb9 100644
--- a/TensorFlow_eaxmple/Model_train_test/condition_monitoring/self_try/compare/RNet-L.py
+++ b/TensorFlow_eaxmple/Model_train_test/condition_monitoring/self_try/compare/RNet-L.py
@@ -2,9 +2,655 @@
# coding: utf-8
+
'''
@Author : dingjiawen
@Date : 2022/10/11 18:53
-@Usage :
+@Usage : 对比实验,与JointNet相同深度,LCAU,进行预测
@Desc :
-'''
\ No newline at end of file
+'''
+
+import tensorflow as tf
+import tensorflow.keras
+import numpy as np
+import pandas as pd
+import matplotlib.pyplot as plt
+from model.DepthwiseCon1D.DepthwiseConv1D import DepthwiseConv1D
+from model.Dynamic_channelAttention.Dynamic_channelAttention import DynamicChannelAttention
+from condition_monitoring.data_deal import loadData
+from model.Joint_Monitoring.compare.RNet_L import Joint_Monitoring
+
+from model.CommonFunction.CommonFunction import *
+from sklearn.model_selection import train_test_split
+from tensorflow.keras.models import load_model, save_model
+import random
+
+'''超参数设置'''
+time_stamp = 120
+feature_num = 10
+batch_size = 16
+learning_rate = 0.001
+EPOCH = 101
+model_name = "RNet_L"
+'''EWMA超参数'''
+K = 18
+namuda = 0.01
+'''保存名称'''
+
+save_name = "./model/weight/{0}_timestamp{1}_feature{2}_weight/weight".format(model_name,
+ time_stamp,
+ feature_num,
+ batch_size,
+ EPOCH)
+save_step_two_name = "./model/two_weight/{0}_timestamp{1}_feature{2}_weight/weight".format(model_name,
+ time_stamp,
+ feature_num,
+ batch_size,
+ EPOCH)
+
+# save_name = "../model/joint/{0}_timestamp{1}_feature{2}.h5".format(model_name,
+# time_stamp,
+# feature_num,
+# batch_size,
+# EPOCH)
+# save_step_two_name = "../model/joint_two/{0}_timestamp{1}_feature{2}.h5".format(model_name,
+# time_stamp,
+# feature_num,
+# batch_size,
+# EPOCH)
+'''文件名'''
+file_name = "G:\data\SCADA数据\jb4q_8_delete_total_zero.csv"
+
+'''
+文件说明:jb4q_8_delete_total_zero.csv是删除了只删除了全是0的列的文件
+文件从0:415548行均是正常值(2019/7.30 00:00:00 - 2019/9/18 11:14:00)
+从415549:432153行均是异常值(2019/9/18 11:21:01 - 2021/1/18 00:00:00)
+'''
+'''文件参数'''
+# 最后正常的时间点
+healthy_date = 415548
+# 最后异常的时间点
+unhealthy_date = 432153
+# 异常容忍程度
+unhealthy_patience = 5
+
+
+def remove(data, time_stamp=time_stamp):
+ rows, cols = data.shape
+ print("remove_data.shape:", data.shape)
+ num = int(rows / time_stamp)
+
+ return data[:num * time_stamp, :]
+ pass
+
+
+# 不重叠采样
+def get_training_data(data, time_stamp: int = time_stamp):
+ removed_data = remove(data=data)
+ rows, cols = removed_data.shape
+ print("removed_data.shape:", data.shape)
+ print("removed_data:", removed_data)
+ train_data = np.reshape(removed_data, [-1, time_stamp, cols])
+ print("train_data:", train_data)
+ batchs, time_stamp, cols = train_data.shape
+
+ for i in range(1, batchs):
+ each_label = np.expand_dims(train_data[i, 0, :], axis=0)
+ if i == 1:
+ train_label = each_label
+ else:
+ train_label = np.concatenate([train_label, each_label], axis=0)
+
+ print("train_data.shape:", train_data.shape)
+ print("train_label.shape", train_label.shape)
+ return train_data[:-1, :], train_label
+
+
+# 重叠采样
+def get_training_data_overlapping(data, time_stamp: int = time_stamp, is_Healthy: bool = True):
+ rows, cols = data.shape
+ train_data = np.empty(shape=[rows - time_stamp - 1, time_stamp, cols])
+ train_label = np.empty(shape=[rows - time_stamp - 1, cols])
+ for i in range(rows):
+ if i + time_stamp >= rows:
+ break
+ if i + time_stamp < rows - 1:
+ train_data[i] = data[i:i + time_stamp]
+ train_label[i] = data[i + time_stamp]
+
+ print("重叠采样以后:")
+ print("data:", train_data) # (300334,120,10)
+ print("label:", train_label) # (300334,10)
+
+ if is_Healthy:
+ train_label2 = np.ones(shape=[train_label.shape[0]])
+ else:
+ train_label2 = np.zeros(shape=[train_label.shape[0]])
+
+ print("label2:", train_label2)
+
+ return train_data, train_label, train_label2
+
+
+# RepConv重参数化卷积
+def RepConv(input_tensor, k=3):
+ _, _, output_dim = input_tensor.shape
+ conv1 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=k, strides=1, padding='SAME')(input_tensor)
+ b1 = tf.keras.layers.BatchNormalization()(conv1)
+
+ conv2 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=1, strides=1, padding='SAME')(input_tensor)
+ b2 = tf.keras.layers.BatchNormalization()(conv2)
+
+ b3 = tf.keras.layers.BatchNormalization()(input_tensor)
+
+ out = tf.keras.layers.Add()([b1, b2, b3])
+ out = tf.nn.relu(out)
+ return out
+
+
+# RepBlock模块
+def RepBlock(input_tensor, num: int = 3):
+ for i in range(num):
+ input_tensor = RepConv(input_tensor)
+ return input_tensor
+
+
+# GAP 全局平均池化
+def Global_avg_channelAttention(input_tensor):
+ _, length, channel = input_tensor.shape
+ DWC1 = DepthwiseConv1D(kernel_size=1, padding='SAME')(input_tensor)
+ GAP = tf.keras.layers.GlobalAvgPool1D()(DWC1)
+ c1 = tf.keras.layers.Conv1D(filters=channel, kernel_size=1, padding='SAME')(GAP)
+ s1 = tf.nn.sigmoid(c1)
+ output = tf.multiply(input_tensor, s1)
+ return output
+
+
+# GDP 全局动态池化
+def Global_Dynamic_channelAttention(input_tensor):
+ _, length, channel = input_tensor.shape
+ DWC1 = DepthwiseConv1D(kernel_size=1, padding='SAME')(input_tensor)
+
+ # GAP
+ GAP = tf.keras.layers.GlobalAvgPool1D()(DWC1)
+ c1 = tf.keras.layers.Conv1D(filters=channel, kernel_size=1, padding='SAME')(GAP)
+ s1 = tf.nn.sigmoid(c1)
+
+ # GMP
+ GMP = tf.keras.layers.GlobalMaxPool1D()(DWC1)
+ c2 = tf.keras.layers.Conv1D(filters=channel, kernel_size=1, padding='SAME')(GMP)
+ s3 = tf.nn.sigmoid(c2)
+
+ output = tf.multiply(input_tensor, s1)
+ return output
+
+
+# 归一化
+def normalization(data):
+ rows, cols = data.shape
+ print("归一化之前:", data)
+ print(data.shape)
+ print("======================")
+
+ # 归一化
+ max = np.max(data, axis=0)
+ max = np.broadcast_to(max, [rows, cols])
+ min = np.min(data, axis=0)
+ min = np.broadcast_to(min, [rows, cols])
+
+ data = (data - min) / (max - min)
+ print("归一化之后:", data)
+ print(data.shape)
+
+ return data
+
+
+# 正则化
+def Regularization(data):
+ rows, cols = data.shape
+ print("正则化之前:", data)
+ print(data.shape)
+ print("======================")
+
+ # 正则化
+ mean = np.mean(data, axis=0)
+ mean = np.broadcast_to(mean, shape=[rows, cols])
+ dst = np.sqrt(np.var(data, axis=0))
+ dst = np.broadcast_to(dst, shape=[rows, cols])
+ data = (data - mean) / dst
+ print("正则化之后:", data)
+ print(data.shape)
+
+ return data
+ pass
+
+
+def EWMA(data, K=K, namuda=namuda):
+ # t是啥暂时未知
+ t = 0
+ mid = np.mean(data, axis=0)
+ standard = np.sqrt(np.var(data, axis=0))
+ UCL = mid + K * standard * np.sqrt(namuda / (2 - namuda) * (1 - (1 - namuda) ** 2 * t))
+ LCL = mid - K * standard * np.sqrt(namuda / (2 - namuda) * (1 - (1 - namuda) ** 2 * t))
+ return mid, UCL, LCL
+ pass
+
+
+# trian_data:(300455,120,10)
+# trian_label1:(300455,10)
+# trian_label2:(300455,)
+def shuffle(train_data, train_label1, train_label2, is_split: bool = False, split_size: float = 0.2):
+ (train_data, test_data, train_label1, test_label1, train_label2, test_label2) = train_test_split(train_data,
+ train_label1,
+ train_label2,
+ test_size=split_size,
+ shuffle=True,
+ random_state=100)
+ if is_split:
+ return train_data, train_label1, train_label2, test_data, test_label1, test_label2
+ train_data = np.concatenate([train_data, test_data], axis=0)
+ train_label1 = np.concatenate([train_label1, test_label1], axis=0)
+ train_label2 = np.concatenate([train_label2, test_label2], axis=0)
+ # print(train_data.shape)
+ # print(train_label1.shape)
+ # print(train_label2.shape)
+ # print(train_data.shape)
+
+ return train_data, train_label1, train_label2
+ pass
+
+
+def split_test_data(healthy_data, healthy_label1, healthy_label2, unhealthy_data, unhealthy_label1, unhealthy_label2,
+ split_size: float = 0.2, shuffle: bool = True):
+ data = np.concatenate([healthy_data, unhealthy_data], axis=0)
+ label1 = np.concatenate([healthy_label1, unhealthy_label1], axis=0)
+ label2 = np.concatenate([healthy_label2, unhealthy_label2], axis=0)
+ (train_data, test_data, train_label1, test_label1, train_label2, test_label2) = train_test_split(data,
+ label1,
+ label2,
+ test_size=split_size,
+ shuffle=shuffle,
+ random_state=100)
+
+ # print(train_data.shape)
+ # print(train_label1.shape)
+ # print(train_label2.shape)
+ # print(train_data.shape)
+
+ return train_data, train_label1, train_label2, test_data, test_label1, test_label2
+
+ pass
+
+
+# trian_data:(300455,120,10)
+# trian_label1:(300455,10)
+# trian_label2:(300455,)
+def train_step_one(train_data, train_label1, train_label2):
+ model = Joint_Monitoring()
+ # # # # TODO 需要运行编译一次,才能打印model.summary()
+ # model.build(input_shape=(batch_size, filter_num, dims))
+ # model.summary()
+ history_loss = []
+ history_val_loss = []
+ learning_rate = 1e-3
+ for epoch in range(EPOCH):
+
+ print()
+ print("EPOCH:", epoch, "/", EPOCH, ":")
+ train_data, train_label1, train_label2 = shuffle(train_data, train_label1, train_label2)
+ if epoch == 0:
+ train_data, train_label1, train_label2, val_data, val_label1, val_label2 = shuffle(train_data, train_label1,
+ train_label2,
+ is_split=True)
+ # print()
+ # print("EPOCH:", epoch, "/", EPOCH, ":")
+ # 用于让train知道,这是这个epoch中的第几次训练
+ z = 0
+ # 用于batch_size次再训练
+ k = 1
+ for data_1, label_1, label_2 in zip(train_data, train_label1, train_label2):
+ size, _, _ = train_data.shape
+ data_1 = tf.expand_dims(data_1, axis=0)
+ label_1 = tf.expand_dims(label_1, axis=0)
+ label_2 = tf.expand_dims(label_2, axis=0)
+ if batch_size != 1:
+ if k % batch_size == 1:
+ data = data_1
+ label1 = label_1
+ label2 = label_2
+ else:
+ data = tf.concat([data, data_1], axis=0)
+ label1 = tf.concat([label1, label_1], axis=0)
+ label2 = tf.concat([label2, label_2], axis=0)
+ else:
+ data = data_1
+ label1 = label_1
+ label2 = label_2
+
+ if k % batch_size == 0:
+ # label = tf.expand_dims(label, axis=-1)
+ loss_value, accuracy_value = model.train(input_tensor=data, label1=label1, label2=label2,
+ learning_rate=learning_rate,
+ is_first_time=True)
+ print(z * batch_size, "/", size, ":===============>", "loss:", loss_value.numpy())
+ k = 0
+ z = z + 1
+ k = k + 1
+ val_loss, val_accuracy = model.get_val_loss(val_data=val_data, val_label1=val_label1, val_label2=val_label2,
+ is_first_time=True)
+ SaveBestModel(model=model, save_name=save_name, history_loss=history_val_loss, loss_value=val_loss.numpy())
+ # SaveBestH5Model(model=model, save_name=save_name, history_loss=history_val_loss, loss_value=val_loss.numpy())
+ history_val_loss.append(val_loss)
+ history_loss.append(loss_value.numpy())
+ print('Training loss is :', loss_value.numpy())
+ print('Validating loss is :', val_loss.numpy())
+ if IsStopTraining(history_loss=history_val_loss, patience=7):
+ break
+ if Is_Reduce_learning_rate(history_loss=history_val_loss, patience=3):
+ if learning_rate >= 1e-4:
+ learning_rate = learning_rate * 0.1
+ pass
+
+
+def train_step_two(step_one_model, step_two_model, train_data, train_label1, train_label2):
+ # step_two_model = Joint_Monitoring()
+ # step_two_model.build(input_shape=(batch_size, time_stamp, feature_num))
+ # step_two_model.summary()
+ history_loss = []
+ history_val_loss = []
+ history_accuracy = []
+ learning_rate = 1e-3
+ for epoch in range(EPOCH):
+ print()
+ print("EPOCH:", epoch, "/", EPOCH, ":")
+ train_data, train_label1, train_label2 = shuffle(train_data, train_label1, train_label2)
+ if epoch == 0:
+ train_data, train_label1, train_label2, val_data, val_label1, val_label2 = shuffle(train_data, train_label1,
+ train_label2,
+ is_split=True)
+ # print()
+ # print("EPOCH:", epoch, "/", EPOCH, ":")
+ # 用于让train知道,这是这个epoch中的第几次训练
+ z = 0
+ # 用于batch_size次再训练
+ k = 1
+ accuracy_num = 0
+ for data_1, label_1, label_2 in zip(train_data, train_label1, train_label2):
+ size, _, _ = train_data.shape
+ data_1 = tf.expand_dims(data_1, axis=0)
+ label_1 = tf.expand_dims(label_1, axis=0)
+ label_2 = tf.expand_dims(label_2, axis=0)
+ if batch_size != 1:
+ if k % batch_size == 1:
+ data = data_1
+ label1 = label_1
+ label2 = label_2
+ else:
+ data = tf.concat([data, data_1], axis=0)
+ label1 = tf.concat([label1, label_1], axis=0)
+ label2 = tf.concat([label2, label_2], axis=0)
+ else:
+ data = data_1
+ label1 = label_1
+ label2 = label_2
+
+ if k % batch_size == 0:
+ # label = tf.expand_dims(label, axis=-1)
+ output1, output2, output3, _ = step_one_model.call(inputs=data, is_first_time=True)
+ loss_value, accuracy_value = step_two_model.train(input_tensor=data, label1=label1, label2=label2,
+ learning_rate=learning_rate,
+ is_first_time=False, pred_3=output1, pred_4=output2,
+ pred_5=output3)
+ accuracy_num += accuracy_value
+ print(z * batch_size, "/", size, ":===============>", "loss:", loss_value.numpy(), "| accuracy:",
+ accuracy_num / ((z + 1) * batch_size))
+ k = 0
+ z = z + 1
+ k = k + 1
+
+ val_loss, val_accuracy = step_two_model.get_val_loss(val_data=val_data, val_label1=val_label1,
+ val_label2=val_label2,
+ is_first_time=False, step_one_model=step_one_model)
+ SaveBestModelByAccuracy(model=step_two_model, save_name=save_step_two_name, history_accuracy=history_accuracy,
+ accuracy_value=val_accuracy)
+ history_val_loss.append(val_loss)
+ history_loss.append(loss_value.numpy())
+ history_accuracy.append(val_accuracy)
+ print('Training loss is : {0} | Training accuracy is : {1}'.format(loss_value.numpy(),
+ accuracy_num / ((z + 1) * batch_size)))
+ print('Validating loss is : {0} | Validating accuracy is : {1}'.format(val_loss.numpy(), val_accuracy))
+ if IsStopTraining(history_loss=history_val_loss, patience=7):
+ break
+ if Is_Reduce_learning_rate(history_loss=history_val_loss, patience=3):
+ if learning_rate >= 1e-4:
+ learning_rate = learning_rate * 0.1
+ pass
+
+
+def test(step_one_model, step_two_model, test_data, test_label1, test_label2):
+ history_loss = []
+ history_val_loss = []
+
+ val_loss, val_accuracy = step_two_model.get_val_loss(val_data=test_data, val_label1=test_label1,
+ val_label2=test_label2,
+ is_first_time=False, step_one_model=step_one_model)
+
+ history_val_loss.append(val_loss)
+ print("val_accuracy:", val_accuracy)
+ print("val_loss:", val_loss)
+
+
+def showResult(step_two_model: Joint_Monitoring, test_data, isPlot: bool = False):
+ # 获取模型的所有参数的个数
+ # step_two_model.count_params()
+ total_result = []
+ size, length, dims = test_data.shape
+ for epoch in range(0, size - batch_size + 1, batch_size):
+ each_test_data = test_data[epoch:epoch + batch_size, :, :]
+ _, _, _, output4 = step_two_model.call(each_test_data, is_first_time=False)
+ total_result.append(output4)
+ total_result = np.reshape(total_result, [total_result.__len__(), -1])
+ total_result = np.reshape(total_result, [-1, ])
+ if isPlot:
+ plt.scatter(list(range(total_result.shape[0])), total_result, c='black', s=10)
+ # 画出 y=1 这条水平线
+ plt.axhline(0.5, c='red', label='Failure threshold')
+ # 箭头指向上面的水平线
+ # plt.arrow(35000, 0.9, 33000, 0.75, head_width=0.02, head_length=0.1, shape="full", fc='red', ec='red',
+ # alpha=0.9, overhang=0.5)
+ # plt.text(35000, 0.9, "Truth Fault", fontsize=10, color='black', verticalalignment='top')
+ plt.axvline(test_data.shape[0] * 2 / 3, c='blue', ls='-.')
+ plt.xlabel("time")
+ plt.ylabel("confience")
+ plt.text(total_result.shape[0] * 4 / 5, 0.6, "Fault", fontsize=10, color='black', verticalalignment='top',
+ horizontalalignment='center',
+ bbox={'facecolor': 'grey',
+ 'pad': 10})
+ plt.text(total_result.shape[0] * 1 / 3, 0.4, "Norm", fontsize=10, color='black', verticalalignment='top',
+ horizontalalignment='center',
+ bbox={'facecolor': 'grey',
+ 'pad': 10})
+ plt.grid()
+ # plt.ylim(0, 1)
+ # plt.xlim(-50, 1300)
+ # plt.legend("", loc='upper left')
+ plt.show()
+ return total_result
+
+
+def get_MSE(data, label, new_model, isStandard: bool = True, isPlot: bool = True, predictI: int = 1):
+ predicted_data1 = []
+ predicted_data2 = []
+ predicted_data3 = []
+ size, length, dims = data.shape
+ for epoch in range(0, size, batch_size):
+ each_test_data = data[epoch:epoch + batch_size, :, :]
+ output1, output2, output3, _ = new_model.call(inputs=each_test_data, is_first_time=True)
+ if epoch == 0:
+ predicted_data1 = output1
+ predicted_data2 = output2
+ predicted_data3 = output3
+ else:
+ predicted_data1 = np.concatenate([predicted_data1, output1], axis=0)
+ predicted_data2 = np.concatenate([predicted_data2, output2], axis=0)
+ predicted_data3 = np.concatenate([predicted_data3, output3], axis=0)
+
+ predicted_data1 = np.reshape(predicted_data1, [-1, 10])
+ predicted_data2 = np.reshape(predicted_data2, [-1, 10])
+ predicted_data3 = np.reshape(predicted_data3, [-1, 10])
+ predict_data = 0
+
+ predict_data = predicted_data1
+ mseList = []
+ meanList = []
+ maxList = []
+
+ for i in range(1, 4):
+ print("i:", i)
+ if i == 1:
+ predict_data = predicted_data1
+ elif i == 2:
+ predict_data = predicted_data2
+ elif i == 3:
+ predict_data = predicted_data3
+ temp = np.abs(predict_data - label)
+ temp1 = (temp - np.broadcast_to(np.mean(temp, axis=0), shape=predict_data.shape))
+ temp2 = np.broadcast_to(np.sqrt(np.var(temp, axis=0)), shape=predict_data.shape)
+ temp3 = temp1 / temp2
+ mse = np.sum((temp1 / temp2) ** 2, axis=1)
+
+ print("mse.shape:", mse.shape)
+ # mse=np.mean((predicted_data-label)**2,axis=1)
+ # print("mse", mse)
+ mseList.append(mse)
+ if isStandard:
+ dims, = mse.shape
+ mean = np.mean(mse)
+ std = np.sqrt(np.var(mse))
+ max = mean + 3 * std
+ print("max.shape:", max.shape)
+ # min = mean-3*std
+ max = np.broadcast_to(max, shape=[dims, ])
+ # min = np.broadcast_to(min,shape=[dims,])
+ mean = np.broadcast_to(mean, shape=[dims, ])
+ if isPlot:
+ plt.figure(random.randint(1, 9))
+ plt.plot(max)
+ plt.plot(mse)
+ plt.plot(mean)
+ # plt.plot(min)
+ plt.show()
+ maxList.append(max)
+ meanList.append(mean)
+ else:
+ if isPlot:
+ plt.figure(random.randint(1, 9))
+ plt.plot(mse)
+ # plt.plot(min)
+ plt.show()
+
+ return mseList, meanList, maxList
+ # pass
+
+
+# healthy_data是健康数据,用于确定阈值,all_data是完整的数据,用于模型出结果
+def getResult(model: tf.keras.Model, healthy_data, healthy_label, unhealthy_data, unhealthy_label, isPlot: bool = False,
+ isSave: bool = False, predictI: int = 1):
+ # TODO 计算MSE确定阈值
+
+ mseList, meanList, maxList = get_MSE(healthy_data, healthy_label, model)
+ mse1List, _, _ = get_MSE(unhealthy_data, unhealthy_label, model, isStandard=False)
+
+ for mse, mean, max, mse1 in zip(mseList, meanList, maxList, mse1List):
+
+ # 误报率的计算
+ total, = mse.shape
+ faultNum = 0
+ faultList = []
+ for i in range(total):
+ if (mse[i] > max[i]):
+ faultNum += 1
+ faultList.append(mse[i])
+
+ fault_rate = faultNum / total
+ print("误报率:", fault_rate)
+
+ # 漏报率计算
+ missNum = 0
+ missList = []
+ all, = mse1.shape
+ for i in range(all):
+ if (mse1[i] < max[0]):
+ missNum += 1
+ missList.append(mse1[i])
+
+ miss_rate = missNum / all
+ print("漏报率:", miss_rate)
+
+ # 总体图
+ print("mse:", mse)
+ print("mse1:", mse1)
+ print("============================================")
+ total_mse = np.concatenate([mse, mse1], axis=0)
+ total_max = np.broadcast_to(max[0], shape=[total_mse.shape[0], ])
+ # min = np.broadcast_to(min,shape=[dims,])
+ total_mean = np.broadcast_to(mean[0], shape=[total_mse.shape[0], ])
+
+ plt.figure(random.randint(1, 9))
+ plt.plot(total_max)
+ plt.plot(total_mse)
+ plt.plot(total_mean)
+ # plt.plot(min)
+ plt.show()
+ pass
+
+
+if __name__ == '__main__':
+ total_data = loadData.execute(N=feature_num, file_name=file_name)
+ total_data = normalization(data=total_data)
+ train_data_healthy, train_label1_healthy, train_label2_healthy = get_training_data_overlapping(
+ total_data[:healthy_date, :], is_Healthy=True)
+ train_data_unhealthy, train_label1_unhealthy, train_label2_unhealthy = get_training_data_overlapping(
+ total_data[healthy_date - time_stamp + unhealthy_patience:unhealthy_date, :],
+ is_Healthy=False)
+ #### TODO 第一步训练
+ # 单次测试
+ # train_step_one(train_data=train_data_healthy[:256, :, :], train_label1=train_label1_healthy[:256, :],
+ # train_label2=train_label2_healthy[:256, ])
+ #### 模型训练
+ train_step_one(train_data=train_data_healthy, train_label1=train_label1_healthy, train_label2=train_label2_healthy)
+
+ # 导入第一步已经训练好的模型,一个继续训练,一个只输出结果
+ step_one_model = Joint_Monitoring()
+ step_one_model.load_weights(save_name)
+ #
+ # step_two_model = Joint_Monitoring()
+ # step_two_model.load_weights(save_name)
+
+ # #### TODO 计算MSE
+ healthy_size, _, _ = train_data_healthy.shape
+ unhealthy_size, _, _ = train_data_unhealthy.shape
+ all_data, _, _ = get_training_data_overlapping(
+ total_data[healthy_size - 2 * unhealthy_size:unhealthy_date, :], is_Healthy=True)
+
+ ##出结果单次测试
+ # getResult(step_one_model,
+ # healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:healthy_size - 2 * unhealthy_size + 200,
+ # :],
+ # healthy_label=train_label1_healthy[
+ # healthy_size - 2 * unhealthy_size:healthy_size - 2 * unhealthy_size + 200, :],
+ # unhealthy_data=train_data_unhealthy[:200,:], unhealthy_label=train_label1_unhealthy[:200,:])
+
+ getResult(step_one_model, healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:, :],
+ healthy_label=train_label1_healthy[healthy_size - 2 * unhealthy_size:, :],
+ unhealthy_data=train_data_unhealthy, unhealthy_label=train_label1_unhealthy)
+
+ # ###TODO 展示全部的结果
+ # all_data, _, _ = get_training_data_overlapping(
+ # total_data[healthy_size - 2 * unhealthy_size:unhealthy_date, :], is_Healthy=True)
+ # # all_data = np.concatenate([])
+ # # 单次测试
+ # # showResult(step_two_model, test_data=all_data[:32], isPlot=True)
+ # showResult(step_two_model, test_data=all_data, isPlot=True)
+
+ pass
diff --git a/TensorFlow_eaxmple/Model_train_test/condition_monitoring/self_try/compare/RNet.py b/TensorFlow_eaxmple/Model_train_test/condition_monitoring/self_try/compare/RNet.py
index 3850c4b..e578290 100644
--- a/TensorFlow_eaxmple/Model_train_test/condition_monitoring/self_try/compare/RNet.py
+++ b/TensorFlow_eaxmple/Model_train_test/condition_monitoring/self_try/compare/RNet.py
@@ -477,91 +477,131 @@ def showResult(step_two_model: Joint_Monitoring, test_data, isPlot: bool = False
return total_result
-def get_MSE(data, label, new_model, isStandard: bool = True, isPlot: bool = True):
+def get_MSE(data, label, new_model, isStandard: bool = True, isPlot: bool = True, predictI: int = 1):
predicted_data1 = []
predicted_data2 = []
predicted_data3 = []
size, length, dims = data.shape
- for epoch in range(0, size - batch_size + 1, batch_size):
+ for epoch in range(0, size, batch_size):
each_test_data = data[epoch:epoch + batch_size, :, :]
output1, output2, output3, _ = new_model.call(inputs=each_test_data, is_first_time=True)
- predicted_data1.append(output1)
- predicted_data2.append(output2)
- predicted_data3.append(output3)
+ if epoch == 0:
+ predicted_data1 = output1
+ predicted_data2 = output2
+ predicted_data3 = output3
+ else:
+ predicted_data1 = np.concatenate([predicted_data1, output1], axis=0)
+ predicted_data2 = np.concatenate([predicted_data2, output2], axis=0)
+ predicted_data3 = np.concatenate([predicted_data3, output3], axis=0)
predicted_data1 = np.reshape(predicted_data1, [-1, 10])
predicted_data2 = np.reshape(predicted_data2, [-1, 10])
predicted_data3 = np.reshape(predicted_data3, [-1, 10])
- temp = np.abs(predicted_data1 - label)
- temp1 = (temp - np.broadcast_to(np.mean(temp, axis=0), shape=predicted_data1.shape))
- temp2 = np.broadcast_to(np.sqrt(np.var(temp, axis=0)), shape=predicted_data1.shape)
- temp3 = temp1 / temp2
- mse = np.sum((temp1 / temp2) ** 2, axis=1)
- print("z:", mse)
- print(mse.shape)
+ predict_data = 0
- # mse=np.mean((predicted_data-label)**2,axis=1)
- print("mse", mse)
- if isStandard:
- dims, = mse.shape
- mean = np.mean(mse)
- std = np.sqrt(np.var(mse))
- max = mean + 3 * std
- print("max:", max)
- # min = mean-3*std
- max = np.broadcast_to(max, shape=[dims, ])
- # min = np.broadcast_to(min,shape=[dims,])
- mean = np.broadcast_to(mean, shape=[dims, ])
- if isPlot:
- plt.figure(random.randint(1, 9))
- plt.plot(max)
- plt.plot(mse)
- plt.plot(mean)
- # plt.plot(min)
- plt.show()
- else:
- if isPlot:
- plt.figure(random.randint(1, 9))
- plt.plot(mse)
- # plt.plot(min)
- plt.show()
- return mse
+ predict_data = predicted_data1
+ mseList=[]
+ meanList=[]
+ maxList=[]
- return mse, mean, max
+ for i in range(1,4):
+ print("i:",i)
+ if i == 1:
+ predict_data = predicted_data1
+ elif i == 2:
+ predict_data = predicted_data2
+ elif i == 3:
+ predict_data = predicted_data3
+ temp = np.abs(predict_data - label)
+ temp1 = (temp - np.broadcast_to(np.mean(temp, axis=0), shape=predict_data.shape))
+ temp2 = np.broadcast_to(np.sqrt(np.var(temp, axis=0)), shape=predict_data.shape)
+ temp3 = temp1 / temp2
+ mse = np.sum((temp1 / temp2) ** 2, axis=1)
+
+ print("mse.shape:",mse.shape)
+ # mse=np.mean((predicted_data-label)**2,axis=1)
+ # print("mse", mse)
+ mseList.append(mse)
+ if isStandard:
+ dims, = mse.shape
+ mean = np.mean(mse)
+ std = np.sqrt(np.var(mse))
+ max = mean + 3 * std
+ print("max.shape:", max.shape)
+ # min = mean-3*std
+ max = np.broadcast_to(max, shape=[dims, ])
+ # min = np.broadcast_to(min,shape=[dims,])
+ mean = np.broadcast_to(mean, shape=[dims, ])
+ if isPlot:
+ plt.figure(random.randint(1, 9))
+ plt.plot(max)
+ plt.plot(mse)
+ plt.plot(mean)
+ # plt.plot(min)
+ plt.show()
+ maxList.append(max)
+ meanList.append(mean)
+ else:
+ if isPlot:
+ plt.figure(random.randint(1, 9))
+ plt.plot(mse)
+ # plt.plot(min)
+ plt.show()
+
+
+ return mseList, meanList, maxList
# pass
# healthy_data是健康数据,用于确定阈值,all_data是完整的数据,用于模型出结果
def getResult(model: tf.keras.Model, healthy_data, healthy_label, unhealthy_data, unhealthy_label, isPlot: bool = False,
- isSave: bool = False):
+ isSave: bool = False, predictI: int = 1):
# TODO 计算MSE确定阈值
- mse, mean, max = get_MSE(healthy_data, healthy_label, model)
+ mseList, meanList, maxList = get_MSE(healthy_data, healthy_label, model)
+ mse1List,_,_ = get_MSE(unhealthy_data, unhealthy_label, model, isStandard=False)
- # 误报率的计算
- total, = mse.shape
- faultNum = 0
- faultList = []
- for i in range(total):
- if (mse[i] > max[i]):
- faultNum += 1
- faultList.append(mse[i])
+ for mse,mean,max,mse1 in zip(mseList, meanList, maxList,mse1List):
- fault_rate = faultNum / total
- print("误报率:", fault_rate)
+ # 误报率的计算
+ total, = mse.shape
+ faultNum = 0
+ faultList = []
+ for i in range(total):
+ if (mse[i] > max[i]):
+ faultNum += 1
+ faultList.append(mse[i])
- # 漏报率计算
- missNum = 0
- missList = []
- mse1 = get_MSE(unhealthy_data, unhealthy_label, model, isStandard=False)
- all, = mse1.shape
- for i in range(all):
- if (mse1[i] < max[0]):
- missNum += 1
- missList.append(mse1[i])
+ fault_rate = faultNum / total
+ print("误报率:", fault_rate)
- miss_rate = missNum / all
- print("漏报率:", miss_rate)
+ # 漏报率计算
+ missNum = 0
+ missList = []
+ all, = mse1.shape
+ for i in range(all):
+ if (mse1[i] < max[0]):
+ missNum += 1
+ missList.append(mse1[i])
+
+ miss_rate = missNum / all
+ print("漏报率:", miss_rate)
+
+ #总体图
+ print("mse:",mse)
+ print("mse1:",mse1)
+ print("============================================")
+ total_mse = np.concatenate([mse, mse1], axis=0)
+ total_max = np.broadcast_to(max[0], shape=[total_mse.shape[0], ])
+ # min = np.broadcast_to(min,shape=[dims,])
+ total_mean = np.broadcast_to(mean[0], shape=[total_mse.shape[0], ])
+
+ plt.figure(random.randint(1, 9))
+ plt.plot(total_max)
+ plt.plot(total_mse)
+ plt.plot(total_mean)
+ # plt.plot(min)
+ plt.show()
pass
@@ -593,6 +633,14 @@ if __name__ == '__main__':
all_data, _, _ = get_training_data_overlapping(
total_data[healthy_size - 2 * unhealthy_size:unhealthy_date, :], is_Healthy=True)
+ ##出结果单次测试
+ # getResult(step_one_model,
+ # healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:healthy_size - 2 * unhealthy_size + 200,
+ # :],
+ # healthy_label=train_label1_healthy[
+ # healthy_size - 2 * unhealthy_size:healthy_size - 2 * unhealthy_size + 200, :],
+ # unhealthy_data=train_data_unhealthy[:200,:], unhealthy_label=train_label1_unhealthy[:200,:])
+
getResult(step_one_model, healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:, :],
healthy_label=train_label1_healthy[healthy_size - 2 * unhealthy_size:, :],
unhealthy_data=train_data_unhealthy, unhealthy_label=train_label1_unhealthy)
diff --git a/TensorFlow_eaxmple/Model_train_test/model/Dynamic_channelAttention/Light_channelAttention.py b/TensorFlow_eaxmple/Model_train_test/model/Dynamic_channelAttention/Light_channelAttention.py
new file mode 100644
index 0000000..233ae1e
--- /dev/null
+++ b/TensorFlow_eaxmple/Model_train_test/model/Dynamic_channelAttention/Light_channelAttention.py
@@ -0,0 +1,122 @@
+# _*_ coding: UTF-8 _*_
+
+
+'''
+@Author : dingjiawen
+@Date : 2022/7/12 17:48
+@Usage : 黄鸿海师兄的LCAU
+@Desc :
+'''
+
+import tensorflow as tf
+import tensorflow.keras
+from tensorflow.keras import *
+import tensorflow.keras.layers as layers
+import numpy as np
+import pandas as pd
+import matplotlib.pyplot as plt
+from model.DepthwiseCon1D.DepthwiseConv1D import DepthwiseConv1D
+
+import keras.backend as K
+
+
+class Between_0_1(tf.keras.constraints.Constraint):
+ def __call__(self, w):
+ # 调用父类__init__()方法
+ super(Between_0_1, self).__init__()
+ return K.clip(w, 0, 1)
+
+
+class LightChannelAttention(layers.Layer):
+
+ def __init__(self):
+ # 调用父类__init__()方法
+ super(LightChannelAttention, self).__init__()
+ self.DWC = DepthwiseConv1D(kernel_size=1, padding='SAME')
+ # self.DWC = DepthwiseConv1D(kernel_size=1, padding='causal',dilation_rate=4,data_format='channels_last')
+
+ def build(self, input_shape):
+ if len(input_shape) != 3:
+ raise ValueError('Inputs to `DynamicChannelAttention` should have rank 3. '
+ 'Received input shape:', str(input_shape))
+
+ print(input_shape)
+ # GAP
+ self.GAP = tf.keras.layers.GlobalAvgPool1D()
+ self.c1 = tf.keras.layers.Conv1D(filters=input_shape[2], kernel_size=1, padding='SAME')
+ # s1 = tf.nn.sigmoid(c1)
+
+ # GMP
+ self.GMP = tf.keras.layers.GlobalMaxPool1D()
+ self.c2 = tf.keras.layers.Conv1D(filters=input_shape[2], kernel_size=1, padding='SAME')
+ # s2 = tf.nn.sigmoid(c2)
+
+
+
+ def call(self, inputs, **kwargs):
+ batch_size, length, channel = inputs.shape
+ DWC1 = self.DWC(inputs)
+
+ # GAP
+ GAP = self.GAP(DWC1)
+ GAP = tf.expand_dims(GAP, axis=1)
+ c1 = self.c1(GAP)
+ c1 = tf.keras.layers.BatchNormalization()(c1)
+ s1 = tf.nn.sigmoid(c1)
+
+ # # GMP
+ # GMP = self.GMP(DWC1)
+ # GMP = tf.expand_dims(GMP, axis=1)
+ # c2 = self.c2(GMP)
+ # c2 = tf.keras.layers.BatchNormalization()(c2)
+ # s2 = tf.nn.sigmoid(c2)
+
+ # print(self.weight_kernel)
+
+ # weight_kernel = tf.broadcast_to(self.weight_kernel, shape=[length, channel])
+ # weight_kernel = tf.broadcast_to(weight_kernel, shape=[batch_size, length, channel])
+ s1 = tf.broadcast_to(s1, shape=[batch_size, length, channel])
+ # s2 = tf.broadcast_to(s2, shape=[batch_size, length, channel])
+
+
+ return s1
+
+
+class DynamicPooling(layers.Layer):
+
+ def __init__(self, pool_size=2):
+ # 调用父类__init__()方法
+ super(DynamicPooling, self).__init__()
+ self.pool_size = pool_size
+ pass
+
+ def build(self, input_shape):
+ if len(input_shape) != 3:
+ raise ValueError('Inputs to `DynamicChannelAttention` should have rank 3. '
+ 'Received input shape:', str(input_shape))
+ # GAP
+ self.AP = tf.keras.layers.AveragePooling1D(pool_size=self.pool_size)
+
+ # GMP
+ self.MP = tf.keras.layers.MaxPool1D(pool_size=self.pool_size)
+
+ # weight
+ self.weight_kernel = self.add_weight(
+ shape=(int(input_shape[1] / self.pool_size), input_shape[2]),
+ initializer='glorot_uniform',
+ name='weight_kernel',
+ constraint=Between_0_1())
+
+ def call(self, inputs, **kwargs):
+ batch_size, length, channel = inputs.shape
+
+ # GAP
+ GAP = self.AP(inputs)
+
+ # GMP
+ GMP = self.MP(inputs)
+
+ weight_kernel = tf.broadcast_to(self.weight_kernel, shape=GMP.shape)
+
+ output = tf.add(weight_kernel * GAP, (tf.ones_like(weight_kernel) - weight_kernel) * GMP)
+ return output
diff --git a/TensorFlow_eaxmple/Model_train_test/model/Joint_Monitoring/compare/RNet_L.py b/TensorFlow_eaxmple/Model_train_test/model/Joint_Monitoring/compare/RNet_L.py
new file mode 100644
index 0000000..1a4ccd4
--- /dev/null
+++ b/TensorFlow_eaxmple/Model_train_test/model/Joint_Monitoring/compare/RNet_L.py
@@ -0,0 +1,447 @@
+# _*_ coding: UTF-8 _*_
+
+
+'''
+@Author : dingjiawen
+@Date : 2022/7/14 9:40
+@Usage : 联合监测模型
+@Desc : RNet:去除掉DCAU
+'''
+
+import tensorflow as tf
+import tensorflow.keras as keras
+from tensorflow.keras import *
+import numpy as np
+import pandas as pd
+import matplotlib.pyplot as plt
+from model.DepthwiseCon1D.DepthwiseConv1D import DepthwiseConv1D
+from model.Dynamic_channelAttention.Light_channelAttention import LightChannelAttention, DynamicPooling
+from condition_monitoring.data_deal import loadData
+from model.LossFunction.smooth_L1_Loss import SmoothL1Loss
+
+
+class Joint_Monitoring(keras.Model):
+
+ def __init__(self, conv_filter=20):
+ # 调用父类__init__()方法
+ super(Joint_Monitoring, self).__init__()
+ # step one
+ self.RepDCBlock1 = RevConvBlock(num=3, kernel_size=5)
+ self.conv1 = tf.keras.layers.Conv1D(filters=conv_filter, kernel_size=1, strides=2, padding='SAME')
+ self.upsample1 = tf.keras.layers.UpSampling1D(size=2)
+
+ self.DACU2 = LightChannelAttention()
+ self.RepDCBlock2 = RevConvBlock(num=3, kernel_size=3)
+ self.conv2 = tf.keras.layers.Conv1D(filters=2 * conv_filter, kernel_size=1, strides=2, padding='SAME')
+ self.upsample2 = tf.keras.layers.UpSampling1D(size=2)
+
+ self.DACU3 = LightChannelAttention()
+ self.RepDCBlock3 = RevConvBlock(num=3, kernel_size=3)
+ self.p1 = DynamicPooling(pool_size=2)
+ self.conv3 = tf.keras.layers.Conv1D(filters=2 * conv_filter, kernel_size=3, strides=2, padding='SAME')
+
+ self.DACU4 = LightChannelAttention()
+ self.RepDCBlock4 = RevConvBlock(num=3, kernel_size=3)
+ self.p2 = DynamicPooling(pool_size=4)
+ self.conv4 = tf.keras.layers.Conv1D(filters=conv_filter, kernel_size=3, strides=2, padding='SAME')
+
+ self.RepDCBlock5 = RevConvBlock(num=3, kernel_size=3)
+ self.p3 = DynamicPooling(pool_size=2)
+
+ # step two
+ # 重现原数据
+ self.GRU1 = tf.keras.layers.GRU(128, return_sequences=False)
+ self.d1 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu)
+ self.output1 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu)
+
+ self.GRU2 = tf.keras.layers.GRU(128, return_sequences=False)
+ self.d2 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu)
+ self.output2 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu)
+
+ self.GRU3 = tf.keras.layers.GRU(128, return_sequences=False)
+ self.d3 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu)
+ self.output3 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu)
+
+
+ # loss
+ self.train_loss = []
+
+ def call(self, inputs, training=None, mask=None, is_first_time: bool = True):
+ # step one
+ RepDCBlock1 = self.RepDCBlock1(inputs)
+ RepDCBlock1 = tf.keras.layers.BatchNormalization()(RepDCBlock1)
+ conv1 = self.conv1(RepDCBlock1)
+ conv1 = tf.nn.leaky_relu(conv1)
+ conv1 = tf.keras.layers.BatchNormalization()(conv1)
+ upsample1 = self.upsample1(conv1)
+
+ DACU2 = self.DACU2(upsample1)
+ DACU2 = tf.keras.layers.BatchNormalization()(DACU2)
+ RepDCBlock2 = self.RepDCBlock2(DACU2)
+ RepDCBlock2 = tf.keras.layers.BatchNormalization()(RepDCBlock2)
+ conv2 = self.conv2(RepDCBlock2)
+ conv2 = tf.nn.leaky_relu(conv2)
+ conv2 = tf.keras.layers.BatchNormalization()(conv2)
+ upsample2 = self.upsample2(conv2)
+
+ DACU3 = self.DACU3(upsample2)
+ DACU3 = tf.keras.layers.BatchNormalization()(DACU3)
+ RepDCBlock3 = self.RepDCBlock3(DACU3)
+ RepDCBlock3 = tf.keras.layers.BatchNormalization()(RepDCBlock3)
+ conv3 = self.conv3(RepDCBlock3)
+ conv3 = tf.nn.leaky_relu(conv3)
+ conv3 = tf.keras.layers.BatchNormalization()(conv3)
+
+ concat1 = tf.concat([conv2, conv3], axis=1)
+
+ DACU4 = self.DACU4(concat1)
+ DACU4 = tf.keras.layers.BatchNormalization()(DACU4)
+ RepDCBlock4 = self.RepDCBlock4(DACU4)
+ RepDCBlock4 = tf.keras.layers.BatchNormalization()(RepDCBlock4)
+ conv4 = self.conv4(RepDCBlock4)
+ conv4 = tf.nn.leaky_relu(conv4)
+ conv4 = tf.keras.layers.BatchNormalization()(conv4)
+
+ concat2 = tf.concat([conv1, conv4], axis=1)
+
+ RepDCBlock5 = self.RepDCBlock5(concat2)
+ RepDCBlock5 = tf.keras.layers.BatchNormalization()(RepDCBlock5)
+
+ output1 = []
+ output2 = []
+ output3 = []
+ output4 = []
+
+ if is_first_time:
+ # step two
+ # 重现原数据
+ # 接block3
+ GRU1 = self.GRU1(RepDCBlock3)
+ GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
+ d1 = self.d1(GRU1)
+ # tf.nn.softmax
+ output1 = self.output1(d1)
+ # 接block4
+ GRU2 = self.GRU2(RepDCBlock4)
+ GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
+ d2 = self.d2(GRU2)
+ # tf.nn.softmax
+ output2 = self.output2(d2)
+ # 接block5
+ GRU3 = self.GRU3(RepDCBlock5)
+ GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
+ d3 = self.d3(GRU3)
+ # tf.nn.softmax
+ output3 = self.output3(d3)
+ else:
+ GRU1 = self.GRU1(RepDCBlock3)
+ GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
+ d1 = self.d1(GRU1)
+ # tf.nn.softmax
+ output1 = self.output1(d1)
+ # 接block4
+ GRU2 = self.GRU2(RepDCBlock4)
+ GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
+ d2 = self.d2(GRU2)
+ # tf.nn.softmax
+ output2 = self.output2(d2)
+ # 接block5
+ GRU3 = self.GRU3(RepDCBlock5)
+ GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
+ d3 = self.d3(GRU3)
+ # tf.nn.softmax
+ output3 = self.output3(d3)
+
+ # 多尺度动态池化
+ # p1 = self.p1(output1)
+ # B, _, _ = p1.shape
+ # f1 = tf.reshape(p1, shape=[B, -1])
+ # p2 = self.p2(output2)
+ # f2 = tf.reshape(p2, shape=[B, -1])
+ # p3 = self.p3(output3)
+ # f3 = tf.reshape(p3, shape=[B, -1])
+ # step three
+ # 分类器
+ concat3 = tf.concat([output1, output2, output3], axis=1)
+ # dropout = tf.keras.layers.Dropout(0.25)(concat3)
+ d4 = self.d4(concat3)
+ d5 = self.d5(d4)
+ # d4 = tf.keras.layers.BatchNormalization()(d4)
+ output4 = self.output4(d5)
+
+ return output1, output2, output3, output4
+
+ def get_loss(self, inputs_tensor, label1=None, label2=None, is_first_time: bool = True, pred_3=None, pred_4=None,
+ pred_5=None):
+ # step one
+ RepDCBlock1 = self.RepDCBlock1(inputs_tensor)
+ RepDCBlock1 = tf.keras.layers.BatchNormalization()(RepDCBlock1)
+ conv1 = self.conv1(RepDCBlock1)
+ conv1 = tf.nn.leaky_relu(conv1)
+ conv1 = tf.keras.layers.BatchNormalization()(conv1)
+ upsample1 = self.upsample1(conv1)
+
+ DACU2 = self.DACU2(upsample1)
+ DACU2 = tf.keras.layers.BatchNormalization()(DACU2)
+ RepDCBlock2 = self.RepDCBlock2(DACU2)
+ RepDCBlock2 = tf.keras.layers.BatchNormalization()(RepDCBlock2)
+ conv2 = self.conv2(RepDCBlock2)
+ conv2 = tf.nn.leaky_relu(conv2)
+ conv2 = tf.keras.layers.BatchNormalization()(conv2)
+ upsample2 = self.upsample2(conv2)
+
+ DACU3 = self.DACU3(upsample2)
+ DACU3 = tf.keras.layers.BatchNormalization()(DACU3)
+ RepDCBlock3 = self.RepDCBlock3(DACU3)
+ RepDCBlock3 = tf.keras.layers.BatchNormalization()(RepDCBlock3)
+ conv3 = self.conv3(RepDCBlock3)
+ conv3 = tf.nn.leaky_relu(conv3)
+ conv3 = tf.keras.layers.BatchNormalization()(conv3)
+
+ concat1 = tf.concat([conv2, conv3], axis=1)
+
+ DACU4 = self.DACU4(concat1)
+ DACU4 = tf.keras.layers.BatchNormalization()(DACU4)
+ RepDCBlock4 = self.RepDCBlock4(DACU4)
+ RepDCBlock4 = tf.keras.layers.BatchNormalization()(RepDCBlock4)
+ conv4 = self.conv4(RepDCBlock4)
+ conv4 = tf.nn.leaky_relu(conv4)
+ conv4 = tf.keras.layers.BatchNormalization()(conv4)
+
+ concat2 = tf.concat([conv1, conv4], axis=1)
+
+ RepDCBlock5 = self.RepDCBlock5(concat2)
+ RepDCBlock5 = tf.keras.layers.BatchNormalization()(RepDCBlock5)
+
+ if is_first_time:
+ # step two
+ # 重现原数据
+ # 接block3
+ GRU1 = self.GRU1(RepDCBlock3)
+ GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
+ d1 = self.d1(GRU1)
+ # tf.nn.softmax
+ output1 = self.output1(d1)
+ # 接block4
+ GRU2 = self.GRU2(RepDCBlock4)
+ GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
+ d2 = self.d2(GRU2)
+ # tf.nn.softmax
+ output2 = self.output2(d2)
+ # 接block5
+ GRU3 = self.GRU3(RepDCBlock5)
+ GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
+ d3 = self.d3(GRU3)
+ # tf.nn.softmax
+ output3 = self.output3(d3)
+
+ # reduce_mean降维计算均值
+ MSE_loss1 = SmoothL1Loss()(y_true=label1, y_pred=output1)
+ MSE_loss2 = SmoothL1Loss()(y_true=label1, y_pred=output2)
+ MSE_loss3 = SmoothL1Loss()(y_true=label1, y_pred=output3)
+ # MSE_loss1 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output1))
+ # MSE_loss2 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output2))
+ # MSE_loss3 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output3))
+
+ print("MSE_loss1:", MSE_loss1.numpy())
+ print("MSE_loss2:", MSE_loss2.numpy())
+ print("MSE_loss3:", MSE_loss3.numpy())
+ loss = MSE_loss1 + MSE_loss2 + MSE_loss3
+ Accuracy_num = 0
+
+ else:
+ # step two
+ # 重现原数据
+ # 接block3
+ GRU1 = self.GRU1(RepDCBlock3)
+ GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
+ d1 = self.d1(GRU1)
+ # tf.nn.softmax
+ output1 = self.output1(d1)
+ # 接block4
+ GRU2 = self.GRU2(RepDCBlock4)
+ GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
+ d2 = self.d2(GRU2)
+ # tf.nn.softmax
+ output2 = self.output2(d2)
+ # 接block5
+ GRU3 = self.GRU3(RepDCBlock5)
+ GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
+ d3 = self.d3(GRU3)
+ # tf.nn.softmax
+ output3 = self.output3(d3)
+
+ # 多尺度动态池化
+ # p1 = self.p1(output1)
+ # B, _, _ = p1.shape
+ # f1 = tf.reshape(p1, shape=[B, -1])
+ # p2 = self.p2(output2)
+ # f2 = tf.reshape(p2, shape=[B, -1])
+ # p3 = self.p3(output3)
+ # f3 = tf.reshape(p3, shape=[B, -1])
+ # step three
+ # 分类器
+ concat3 = tf.concat([output1, output2, output3], axis=1)
+ # dropout = tf.keras.layers.Dropout(0.25)(concat3)
+ d4 = self.d4(concat3)
+ d5 = self.d5(d4)
+ # d4 = tf.keras.layers.BatchNormalization()(d4)
+ output4 = self.output4(d5)
+
+ # reduce_mean降维计算均值
+ MSE_loss = SmoothL1Loss()(y_true=pred_3, y_pred=output1)
+ MSE_loss += SmoothL1Loss()(y_true=pred_4, y_pred=output2)
+ MSE_loss += SmoothL1Loss()(y_true=pred_5, y_pred=output3)
+ Cross_Entropy_loss = tf.reduce_mean(
+ tf.losses.binary_crossentropy(y_true=label2, y_pred=output4, from_logits=True))
+
+ print("MSE_loss:", MSE_loss.numpy())
+ print("Cross_Entropy_loss:", Cross_Entropy_loss.numpy())
+ Accuracy_num = self.get_Accuracy(label=label2, output=output4)
+ loss = MSE_loss + Cross_Entropy_loss
+ return loss, Accuracy_num
+
+ def get_Accuracy(self, output, label):
+
+ predict_label = tf.round(output)
+ label = tf.cast(label, dtype=tf.float32)
+
+ t = np.array(label - predict_label)
+
+ b = t[t[:] == 0]
+
+ return b.__len__()
+
+ def get_grad(self, input_tensor, label1=None, label2=None, is_first_time: bool = True, pred_3=None, pred_4=None,
+ pred_5=None):
+ with tf.GradientTape() as tape:
+ # todo 原本tape只会监控由tf.Variable创建的trainable=True属性
+ # tape.watch(self.variables)
+ L, Accuracy_num = self.get_loss(input_tensor, label1=label1, label2=label2, is_first_time=is_first_time,
+ pred_3=pred_3,
+ pred_4=pred_4, pred_5=pred_5)
+ # 保存一下loss,用于输出
+ self.train_loss = L
+ g = tape.gradient(L, self.variables)
+ return g, Accuracy_num
+
+ def train(self, input_tensor, label1=None, label2=None, learning_rate=1e-3, is_first_time: bool = True, pred_3=None,
+ pred_4=None, pred_5=None):
+ g, Accuracy_num = self.get_grad(input_tensor, label1=label1, label2=label2, is_first_time=is_first_time,
+ pred_3=pred_3,
+ pred_4=pred_4, pred_5=pred_5)
+ optimizers.Adam(learning_rate).apply_gradients(zip(g, self.variables))
+ return self.train_loss, Accuracy_num
+
+ # 暂时只支持batch_size等于1,不然要传z比较麻烦
+ def get_val_loss(self, val_data, val_label1, val_label2, batch_size=16, is_first_time: bool = True,
+ step_one_model=None):
+ val_loss = []
+ accuracy_num = 0
+ output1 = 0
+ output2 = 0
+ output3 = 0
+ z = 1
+ size, length, dims = val_data.shape
+ if batch_size == None:
+ batch_size = self.batch_size
+ for epoch in range(0, size - batch_size, batch_size):
+ each_val_data = val_data[epoch:epoch + batch_size, :, :]
+ each_val_label1 = val_label1[epoch:epoch + batch_size, :]
+ each_val_label2 = val_label2[epoch:epoch + batch_size, ]
+ # each_val_data = tf.expand_dims(each_val_data, axis=0)
+ # each_val_query = tf.expand_dims(each_val_query, axis=0)
+ # each_val_label = tf.expand_dims(each_val_label, axis=0)
+ if not is_first_time:
+ output1, output2, output3, _ = step_one_model.call(inputs=each_val_data, is_first_time=True)
+
+ each_loss, each_accuracy_num = self.get_loss(each_val_data, each_val_label1, each_val_label2,
+ is_first_time=is_first_time,
+ pred_3=output1, pred_4=output2, pred_5=output3)
+ accuracy_num += each_accuracy_num
+ val_loss.append(each_loss)
+ z += 1
+
+ val_accuracy = accuracy_num / ((z-1) * batch_size)
+ val_total_loss = tf.reduce_mean(val_loss)
+ return val_total_loss, val_accuracy
+
+
+class RevConv(keras.layers.Layer):
+
+ def __init__(self, kernel_size=3):
+ # 调用父类__init__()方法
+ super(RevConv, self).__init__()
+ self.kernel_size = kernel_size
+
+ def get_config(self):
+ # 自定义层里面的属性
+ config = (
+ {
+ 'kernel_size': self.kernel_size
+ }
+ )
+ base_config = super(RevConv, self).get_config()
+ return dict(list(base_config.items()) + list(config.items()))
+
+ def build(self, input_shape):
+ # print(input_shape)
+ _, _, output_dim = input_shape[0], input_shape[1], input_shape[2]
+ self.conv1 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=self.kernel_size, strides=1,
+ padding='causal',
+ dilation_rate=4)
+
+ self.conv2 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=1, strides=1, padding='causal',
+ dilation_rate=4)
+ # self.b2 = tf.keras.layers.BatchNormalization()
+
+ # self.b3 = tf.keras.layers.BatchNormalization()
+
+ # out = tf.keras.layers.Add()([b1, b2, b3])
+ # out = tf.nn.relu(out)
+
+ def call(self, inputs, **kwargs):
+ conv1 = self.conv1(inputs)
+ b1 = tf.keras.layers.BatchNormalization()(conv1)
+ b1 = tf.nn.leaky_relu(b1)
+ # b1 = self.b1
+
+ conv2 = self.conv2(inputs)
+ b2 = tf.keras.layers.BatchNormalization()(conv2)
+ b2 = tf.nn.leaky_relu(b2)
+
+ b3 = tf.keras.layers.BatchNormalization()(inputs)
+
+ out = tf.keras.layers.Add()([b1, b2, b3])
+ out = tf.nn.relu(out)
+
+ return out
+
+
+class RevConvBlock(keras.layers.Layer):
+
+ def __init__(self, num: int = 3, kernel_size=3):
+ # 调用父类__init__()方法
+ super(RevConvBlock, self).__init__()
+ self.num = num
+ self.kernel_size = kernel_size
+ self.L = []
+ for i in range(num):
+ RepVGG = RevConv(kernel_size=kernel_size)
+ self.L.append(RepVGG)
+
+ def get_config(self):
+ # 自定义层里面的属性
+ config = (
+ {
+ 'kernel_size': self.kernel_size,
+ 'num': self.num
+ }
+ )
+ base_config = super(RevConvBlock, self).get_config()
+ return dict(list(base_config.items()) + list(config.items()))
+
+ def call(self, inputs, **kwargs):
+ for i in range(self.num):
+ inputs = self.L[i](inputs)
+ return inputs