leecode更新
This commit is contained in:
parent
f888fdcc78
commit
8d8994786c
|
|
@ -0,0 +1,117 @@
|
||||||
|
package com.markilue.leecode.backtrace;
|
||||||
|
|
||||||
|
import com.markilue.leecode.stackAndDeque.EvalRPN;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import java.util.*;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @BelongsProject: Leecode
|
||||||
|
* @BelongsPackage: com.markilue.leecode.backtrace
|
||||||
|
* @Author: markilue
|
||||||
|
* @CreateTime: 2022-10-13 11:08
|
||||||
|
* @Description: TODO 力扣17题 电话号码的字母组合:
|
||||||
|
* 给定一个仅包含数字 2-9 的字符串,返回所有它能表示的字母组合。答案可以按 任意顺序 返回。
|
||||||
|
* 给出数字到字母的映射如下(与电话按键相同)。注意 1 不对应任何字母。
|
||||||
|
* @Version: 1.0
|
||||||
|
*/
|
||||||
|
public class IetterCombinations {
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void test() {
|
||||||
|
System.out.println(letterCombinations(""));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
List<List<String>> dict = new ArrayList<List<String>>();
|
||||||
|
|
||||||
|
//构造字典
|
||||||
|
public void construct() {
|
||||||
|
dict.add(new ArrayList<String>(Arrays.asList("a", "b", "c")));//2
|
||||||
|
dict.add(new ArrayList<String>(Arrays.asList("d", "e", "f")));//3
|
||||||
|
dict.add(new ArrayList<String>(Arrays.asList("g", "h", "i")));//4
|
||||||
|
dict.add(new ArrayList<String>(Arrays.asList("j", "k", "l")));//5
|
||||||
|
dict.add(new ArrayList<String>(Arrays.asList("m", "n", "o")));//6
|
||||||
|
dict.add(new ArrayList<String>(Arrays.asList("p", "q", "r", "s")));//7
|
||||||
|
dict.add(new ArrayList<String>(Arrays.asList("t", "u", "v")));//8
|
||||||
|
dict.add(new ArrayList<String>(Arrays.asList("w", "x", "y", "z")));//9
|
||||||
|
}
|
||||||
|
|
||||||
|
List<String> result = new ArrayList<>();
|
||||||
|
StringBuilder cur = new StringBuilder();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 回溯算法:速度击败48.76%,内存击败15.77%
|
||||||
|
* @param digits
|
||||||
|
* @return
|
||||||
|
*/
|
||||||
|
public List<String> letterCombinations(String digits) {
|
||||||
|
if(digits.length()==0){
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
construct();
|
||||||
|
backtracking(digits.length(), digits, 0);
|
||||||
|
return result;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
public void backtracking(int n, String digits, int val) {
|
||||||
|
|
||||||
|
if (cur.length() == n) {
|
||||||
|
result.add(new String(cur.toString()));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// System.out.println(digits.charAt(val)); //2
|
||||||
|
// System.out.println(Integer.valueOf(digits.charAt(val))); 50 =>ASCLL码
|
||||||
|
// System.out.println(Integer.valueOf(digits.charAt(val)).getClass());
|
||||||
|
for (String s : dict.get(Integer.valueOf(String.valueOf(digits.charAt(val)))-2)) {
|
||||||
|
cur.append(s);
|
||||||
|
backtracking(n, digits, val + 1);
|
||||||
|
cur.deleteCharAt(cur.length()-1);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 官方回溯算法:使用map记录,相比之下避免了一系列的类型转换
|
||||||
|
* @param digits
|
||||||
|
* @return
|
||||||
|
*/
|
||||||
|
public List<String> letterCombinations1(String digits) {
|
||||||
|
List<String> combinations = new ArrayList<String>();
|
||||||
|
if (digits.length() == 0) {
|
||||||
|
return combinations;
|
||||||
|
}
|
||||||
|
Map<Character, String> phoneMap = new HashMap<Character, String>() {{
|
||||||
|
put('2', "abc");
|
||||||
|
put('3', "def");
|
||||||
|
put('4', "ghi");
|
||||||
|
put('5', "jkl");
|
||||||
|
put('6', "mno");
|
||||||
|
put('7', "pqrs");
|
||||||
|
put('8', "tuv");
|
||||||
|
put('9', "wxyz");
|
||||||
|
}};
|
||||||
|
backtrack(combinations, phoneMap, digits, 0, new StringBuffer());
|
||||||
|
return combinations;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void backtrack(List<String> combinations, Map<Character, String> phoneMap, String digits, int index, StringBuffer combination) {
|
||||||
|
if (index == digits.length()) {
|
||||||
|
combinations.add(combination.toString());
|
||||||
|
} else {
|
||||||
|
char digit = digits.charAt(index);
|
||||||
|
String letters = phoneMap.get(digit);
|
||||||
|
int lettersCount = letters.length();
|
||||||
|
for (int i = 0; i < lettersCount; i++) {
|
||||||
|
combination.append(letters.charAt(i));
|
||||||
|
backtrack(combinations, phoneMap, digits, index + 1, combination);
|
||||||
|
combination.deleteCharAt(index);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,137 @@
|
||||||
|
package com.markilue.leecode.backtrace;
|
||||||
|
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @BelongsProject: Leecode
|
||||||
|
* @BelongsPackage: com.markilue.leecode.backtrace
|
||||||
|
* @Author: markilue
|
||||||
|
* @CreateTime: 2022-10-13 11:59
|
||||||
|
* @Description: TODO 力扣39题 组合总和:
|
||||||
|
* 给你一个 无重复元素 的整数数组 candidates 和一个目标整数 target ,找出 candidates 中可以使数字和为目标数 target 的 所有 不同组合 ,并以列表形式返回。你可以按 任意顺序 返回这些组合。
|
||||||
|
* candidates 中的 同一个 数字可以 无限制重复被选取 。如果至少一个数字的被选数量不同,则两种组合是不同的。
|
||||||
|
* 对于给定的输入,保证和为 target 的不同组合数少于 150 个。
|
||||||
|
* @Version: 1.0
|
||||||
|
*/
|
||||||
|
public class combinationSum {
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void test() {
|
||||||
|
int[] candidates = {2, 3, 6, 7};
|
||||||
|
int target = 7;
|
||||||
|
System.out.println(combinationSum(candidates, target));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void test1() {
|
||||||
|
int[] candidates = {2, 3, 5};
|
||||||
|
int target = 8;
|
||||||
|
System.out.println(combinationSum(candidates, target));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 回溯算法
|
||||||
|
* 速度击败93.59%,内存击败78.22%
|
||||||
|
*
|
||||||
|
* @param candidates
|
||||||
|
* @param target
|
||||||
|
* @return
|
||||||
|
*/
|
||||||
|
public List<List<Integer>> combinationSum(int[] candidates, int target) {
|
||||||
|
backtracking(candidates, target, 0);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
List<List<Integer>> result = new ArrayList<>();
|
||||||
|
List<Integer> cur = new ArrayList<>();
|
||||||
|
int sum = 0;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param candidates
|
||||||
|
* @param target
|
||||||
|
* @param val 当前candidates数组需要查看的数字的位置
|
||||||
|
*/
|
||||||
|
public void backtracking(int[] candidates, int target, int val) {
|
||||||
|
|
||||||
|
if (sum > target) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (sum == target) {
|
||||||
|
ArrayList<Integer> list = new ArrayList<>(cur);
|
||||||
|
result.add(list);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (val >= candidates.length) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
//定义边界,以用来剪枝
|
||||||
|
//加几个candidates[val]
|
||||||
|
for (int i = 0; i <= (target - sum) / candidates[val]; i++) {
|
||||||
|
for (int j = 0; j < i; j++) {
|
||||||
|
cur.add(candidates[val]);
|
||||||
|
}
|
||||||
|
sum += candidates[val] * i;
|
||||||
|
backtracking(candidates, target, val + 1);
|
||||||
|
for (int j = 0; j < i; j++) {
|
||||||
|
cur.remove(cur.size() - 1);
|
||||||
|
}
|
||||||
|
sum -= candidates[val] * i;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 代码随想录回溯法
|
||||||
|
* 速度击败100%,内存击败73.19%
|
||||||
|
*
|
||||||
|
* @param candidates
|
||||||
|
* @param target
|
||||||
|
* @return
|
||||||
|
*/
|
||||||
|
public List<List<Integer>> combinationSum1(int[] candidates, int target) {
|
||||||
|
Arrays.sort(candidates);
|
||||||
|
backtracking1(candidates, target, 0);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 官方回溯法
|
||||||
|
*
|
||||||
|
* @param candidates
|
||||||
|
* @param target
|
||||||
|
* @param startIndex 当前candidates数组需要查看的数字的位置
|
||||||
|
*/
|
||||||
|
public void backtracking1(int[] candidates, int target, int startIndex) {
|
||||||
|
|
||||||
|
if (sum > target) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (sum == target) {
|
||||||
|
ArrayList<Integer> list = new ArrayList<>(cur);
|
||||||
|
result.add(list);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
//定义边界,以用来剪枝
|
||||||
|
//
|
||||||
|
for (int i = startIndex; i <= candidates.length && sum + candidates[i] <= target; i++) {
|
||||||
|
sum += candidates[i];
|
||||||
|
cur.add(candidates[i]);
|
||||||
|
//这里不需要i+1,表示可以重复读取当前数
|
||||||
|
backtracking1(candidates, target, i);
|
||||||
|
sum -= candidates[i];
|
||||||
|
cur.remove(cur.size() - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,123 @@
|
||||||
|
package com.markilue.leecode.backtrace;
|
||||||
|
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @BelongsProject: Leecode
|
||||||
|
* @BelongsPackage: com.markilue.leecode.backtrace
|
||||||
|
* @Author: markilue
|
||||||
|
* @CreateTime: 2022-10-13 10:21
|
||||||
|
* @Description: TODO 力扣216题 组合总和III:
|
||||||
|
* 找出所有相加之和为 n 的 k 个数的组合,且满足下列条件:
|
||||||
|
* 只使用数字1到9
|
||||||
|
* 每个数字 最多使用一次
|
||||||
|
* 返回 所有可能的有效组合的列表 。该列表不能包含相同的组合两次,组合可以以任何顺序返回。
|
||||||
|
* @Version: 1.0
|
||||||
|
*/
|
||||||
|
public class combinationSum3 {
|
||||||
|
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void test() {
|
||||||
|
int k = 3, n = 7;
|
||||||
|
System.out.println(combinationSum3(k, n));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void test1() {
|
||||||
|
int k = 3, n = 9;
|
||||||
|
System.out.println(combinationSum3(k, n));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 回溯算法:结果如果不剪枝,需要在9^k个解法中寻找
|
||||||
|
* 速度超过100%,内存超过87.1%
|
||||||
|
* @param k
|
||||||
|
* @param n
|
||||||
|
* @return
|
||||||
|
*/
|
||||||
|
public List<List<Integer>> combinationSum3(int k, int n) {
|
||||||
|
|
||||||
|
backtracking(k, n, 1);
|
||||||
|
return result;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
List<List<Integer>> result = new ArrayList<>();
|
||||||
|
List<Integer> cur = new ArrayList<>();
|
||||||
|
int sum = 0;
|
||||||
|
|
||||||
|
public void backtracking(int k, int n, int val) {
|
||||||
|
|
||||||
|
if (cur.size() == k) {
|
||||||
|
if (sum == n) {
|
||||||
|
ArrayList<Integer> list = new ArrayList<>();
|
||||||
|
list.addAll(cur);
|
||||||
|
result.add(list);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
//定义边界,以用来剪枝
|
||||||
|
int threshold = 9;
|
||||||
|
if (n - sum < 9) {
|
||||||
|
threshold = n - sum;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int i = val; i <= threshold; i++) {
|
||||||
|
cur.add(i);
|
||||||
|
sum += i;
|
||||||
|
backtracking(k, n, i + 1);
|
||||||
|
cur.remove(cur.size() - 1);
|
||||||
|
sum -= i;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
List<Integer> temp = new ArrayList<Integer>();
|
||||||
|
List<List<Integer>> ans = new ArrayList<List<Integer>>();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 官方自己枚举法:通过二进制的9位数,来模拟哪个数据被选中了,被选中了就加入temp,通过遍历所有的子集来获得全部的结果
|
||||||
|
* 结果需要2^9个解法中寻找
|
||||||
|
* @param k
|
||||||
|
* @param n
|
||||||
|
* @return
|
||||||
|
*/
|
||||||
|
public List<List<Integer>> combinationSum31(int k, int n) {
|
||||||
|
for (int mask = 0; mask < (1 << 9); ++mask) {
|
||||||
|
if (check(mask, k, n)) {
|
||||||
|
ans.add(new ArrayList<Integer>(temp));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ans;
|
||||||
|
}
|
||||||
|
|
||||||
|
public boolean check(int mask, int k, int n) {
|
||||||
|
temp.clear();
|
||||||
|
for (int i = 0; i < 9; ++i) {
|
||||||
|
if (((1 << i) & mask) != 0) {
|
||||||
|
temp.add(i + 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (temp.size() != k) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
int sum = 0;
|
||||||
|
for (int num : temp) {
|
||||||
|
sum += num;
|
||||||
|
}
|
||||||
|
return sum == n;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
@ -37,6 +37,7 @@ from model.CommonFunction.CommonFunction import *
|
||||||
from sklearn.model_selection import train_test_split
|
from sklearn.model_selection import train_test_split
|
||||||
from tensorflow.keras.models import load_model, save_model
|
from tensorflow.keras.models import load_model, save_model
|
||||||
from keras.callbacks import EarlyStopping
|
from keras.callbacks import EarlyStopping
|
||||||
|
import random
|
||||||
|
|
||||||
'''超参数设置'''
|
'''超参数设置'''
|
||||||
time_stamp = 120
|
time_stamp = 120
|
||||||
|
|
@ -196,7 +197,7 @@ def EWMA(data, K=K, namuda=namuda):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
def get_MSE(data, label, new_model):
|
def get_MSE(data, label, new_model, isStandard: bool = True, isPlot: bool = True):
|
||||||
predicted_data = new_model.predict(data)
|
predicted_data = new_model.predict(data)
|
||||||
|
|
||||||
temp = np.abs(predicted_data - label)
|
temp = np.abs(predicted_data - label)
|
||||||
|
|
@ -209,24 +210,31 @@ def get_MSE(data, label, new_model):
|
||||||
|
|
||||||
# mse=np.mean((predicted_data-label)**2,axis=1)
|
# mse=np.mean((predicted_data-label)**2,axis=1)
|
||||||
print("mse", mse)
|
print("mse", mse)
|
||||||
|
if isStandard:
|
||||||
|
dims, = mse.shape
|
||||||
|
mean = np.mean(mse)
|
||||||
|
std = np.sqrt(np.var(mse))
|
||||||
|
max = mean + 3 * std
|
||||||
|
print("max:", max)
|
||||||
|
# min = mean-3*std
|
||||||
|
max = np.broadcast_to(max, shape=[dims, ])
|
||||||
|
# min = np.broadcast_to(min,shape=[dims,])
|
||||||
|
mean = np.broadcast_to(mean, shape=[dims, ])
|
||||||
|
if isPlot:
|
||||||
|
plt.figure(random.randint(1,9))
|
||||||
|
plt.plot(max)
|
||||||
|
plt.plot(mse)
|
||||||
|
plt.plot(mean)
|
||||||
|
# plt.plot(min)
|
||||||
|
plt.show()
|
||||||
|
else:
|
||||||
|
if isPlot:
|
||||||
|
plt.figure(random.randint(1, 9))
|
||||||
|
plt.plot(mse)
|
||||||
|
# plt.plot(min)
|
||||||
|
plt.show()
|
||||||
|
return mse
|
||||||
|
|
||||||
dims, = mse.shape
|
|
||||||
|
|
||||||
mean = np.mean(mse)
|
|
||||||
std = np.sqrt(np.var(mse))
|
|
||||||
max = mean + 3 * std
|
|
||||||
# min = mean-3*std
|
|
||||||
max = np.broadcast_to(max, shape=[dims, ])
|
|
||||||
# min = np.broadcast_to(min,shape=[dims,])
|
|
||||||
mean = np.broadcast_to(mean, shape=[dims, ])
|
|
||||||
|
|
||||||
# plt.plot(max)
|
|
||||||
# plt.plot(mse)
|
|
||||||
# plt.plot(mean)
|
|
||||||
# # plt.plot(min)
|
|
||||||
# plt.show()
|
|
||||||
#
|
|
||||||
#
|
|
||||||
return mse, mean, max
|
return mse, mean, max
|
||||||
# pass
|
# pass
|
||||||
|
|
||||||
|
|
@ -340,20 +348,20 @@ def showResult(step_two_model: Joint_Monitoring, test_data, isPlot: bool = False
|
||||||
return total_result
|
return total_result
|
||||||
|
|
||||||
|
|
||||||
def GRU_Model():
|
def DCConv_Model():
|
||||||
input = tf.keras.Input(shape=[time_stamp, feature_num])
|
input = tf.keras.Input(shape=[time_stamp, feature_num])
|
||||||
input = tf.cast(input, tf.float32)
|
input = tf.cast(input, tf.float32)
|
||||||
|
|
||||||
LSTM = tf.keras.layers.Conv1D(10, 3, padding="causal",dilation_rate=2)(input)
|
LSTM = tf.keras.layers.Conv1D(10, 3, padding="causal", dilation_rate=2)(input)
|
||||||
LSTM = tf.keras.layers.Conv1D(20, 3, padding="causal",dilation_rate=4)(LSTM)
|
LSTM = tf.keras.layers.Conv1D(20, 3, padding="causal", dilation_rate=4)(LSTM)
|
||||||
LSTM = tf.keras.layers.Conv1D(40, 3, padding="causal",dilation_rate=8)(LSTM)
|
LSTM = tf.keras.layers.Conv1D(40, 3, padding="causal", dilation_rate=8)(LSTM)
|
||||||
LSTM = tf.keras.layers.Conv1D(40, 3, padding="causal",dilation_rate=16)(LSTM)
|
LSTM = tf.keras.layers.Conv1D(40, 3, padding="causal", dilation_rate=16)(LSTM)
|
||||||
LSTM = tf.keras.layers.Conv1D(40, 3, padding="causal",dilation_rate=32)(LSTM)
|
LSTM = tf.keras.layers.Conv1D(40, 3, padding="causal", dilation_rate=32)(LSTM)
|
||||||
LSTM = tf.keras.layers.Conv1D(40, 3, padding="causal",dilation_rate=64)(LSTM)
|
LSTM = tf.keras.layers.Conv1D(40, 3, padding="causal", dilation_rate=64)(LSTM)
|
||||||
LSTM = tf.keras.layers.Conv1D(40, 3, padding="causal",dilation_rate=128)(LSTM)
|
LSTM = tf.keras.layers.Conv1D(40, 3, padding="causal", dilation_rate=128)(LSTM)
|
||||||
# LSTM = tf.keras.layers.Conv1D(40, 3, padding="causal",dilation_rate=2)(LSTM)
|
# LSTM = tf.keras.layers.Conv1D(40, 3, padding="causal",dilation_rate=2)(LSTM)
|
||||||
|
|
||||||
|
LSTM = LSTM[:, -1, :]
|
||||||
# bn = tf.keras.layers.BatchNormalization()(LSTM)
|
# bn = tf.keras.layers.BatchNormalization()(LSTM)
|
||||||
|
|
||||||
d1 = tf.keras.layers.Dense(20)(LSTM)
|
d1 = tf.keras.layers.Dense(20)(LSTM)
|
||||||
|
|
@ -365,6 +373,44 @@ def GRU_Model():
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# healthy_data是健康数据,用于确定阈值,all_data是完整的数据,用于模型出结果
|
||||||
|
def getResult(model: tf.keras.Model, healthy_data, healthy_label, unhealthy_data, unhealthy_label, isPlot: bool = False,
|
||||||
|
isSave: bool = False):
|
||||||
|
# TODO 计算MSE确定阈值
|
||||||
|
|
||||||
|
mse, mean, max = get_MSE(healthy_data, healthy_label, model)
|
||||||
|
|
||||||
|
# 误报率的计算
|
||||||
|
total, = mse.shape
|
||||||
|
faultNum = 0
|
||||||
|
faultList = []
|
||||||
|
for i in range(total):
|
||||||
|
if (mse[i] > max[i]):
|
||||||
|
faultNum += 1
|
||||||
|
faultList.append(mse[i])
|
||||||
|
|
||||||
|
fault_rate = faultNum / total
|
||||||
|
print("误报率:", fault_rate)
|
||||||
|
|
||||||
|
# 漏报率计算
|
||||||
|
missNum = 0
|
||||||
|
missList = []
|
||||||
|
mse1 = get_MSE(unhealthy_data, unhealthy_label, model, isStandard=False)
|
||||||
|
all, = mse1.shape
|
||||||
|
|
||||||
|
flag=True
|
||||||
|
for i in range(all):
|
||||||
|
if (mse1[i] < max[0] and flag) :
|
||||||
|
missNum += 1
|
||||||
|
missList.append(mse1[i])
|
||||||
|
elif(mse1[i]>max[0]):
|
||||||
|
flag=False
|
||||||
|
print("all:",all)
|
||||||
|
miss_rate = missNum / all
|
||||||
|
print("漏报率:", miss_rate)
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
total_data = loadData.execute(N=feature_num, file_name=file_name)
|
total_data = loadData.execute(N=feature_num, file_name=file_name)
|
||||||
total_data = normalization(data=total_data)
|
total_data = normalization(data=total_data)
|
||||||
|
|
@ -375,7 +421,7 @@ if __name__ == '__main__':
|
||||||
is_Healthy=False)
|
is_Healthy=False)
|
||||||
#### TODO 第一步训练
|
#### TODO 第一步训练
|
||||||
# 单次测试
|
# 单次测试
|
||||||
model = GRU_Model()
|
model = DCConv_Model()
|
||||||
|
|
||||||
model.compile(optimizer=tf.optimizers.Adam(0.01), loss=tf.losses.mse)
|
model.compile(optimizer=tf.optimizers.Adam(0.01), loss=tf.losses.mse)
|
||||||
model.summary()
|
model.summary()
|
||||||
|
|
@ -389,49 +435,25 @@ if __name__ == '__main__':
|
||||||
mode='min',
|
mode='min',
|
||||||
period=1)
|
period=1)
|
||||||
|
|
||||||
history = model.fit(train_data_healthy[:30000, :, :], train_label1_healthy[:30000, :], epochs=20,
|
# history = model.fit(train_data_healthy[:30000, :, :], train_label1_healthy[:30000, :], epochs=20,
|
||||||
batch_size=32, validation_split=0.2, shuffle=False, verbose=1,
|
# batch_size=32, validation_split=0.2, shuffle=False, verbose=1,
|
||||||
callbacks=[checkpoint, early_stop])
|
# callbacks=[checkpoint, early_stop])
|
||||||
model.save(save_name)
|
# model.save(save_name)
|
||||||
|
|
||||||
## TODO testing
|
## TODO testing
|
||||||
test_data, test_label = get_training_data(total_data[:300455, :])
|
# test_data, test_label = get_training_data(total_data[:healthy_date, :])
|
||||||
|
# newModel = tf.keras.models.load_model(save_name)
|
||||||
|
# mse, mean, max = get_MSE(test_data, test_label, new_model=newModel)
|
||||||
|
|
||||||
|
healthy_size, _, _ = train_data_healthy.shape
|
||||||
|
unhealthy_size, _, _ = train_data_unhealthy.shape
|
||||||
|
all_data, _, _ = get_training_data_overlapping(
|
||||||
|
total_data[healthy_size - 2 * unhealthy_size:unhealthy_date, :], is_Healthy=True)
|
||||||
|
|
||||||
newModel = tf.keras.models.load_model(save_name)
|
newModel = tf.keras.models.load_model(save_name)
|
||||||
mse, mean, max = get_MSE(test_data, test_label, new_model=newModel)
|
getResult(newModel, healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||||
|
healthy_label=train_label1_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||||
test_data, test_label = get_training_data(total_data[20000:, :])
|
unhealthy_data=train_data_unhealthy, unhealthy_label=train_label1_unhealthy)
|
||||||
predicted_data = newModel.predict(test_data)
|
# mse, mean, max = get_MSE(train_data_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||||
rows, cols = predicted_data.shape
|
# train_label1_healthy[healthy_size - 2 * unhealthy_size:, :], new_model=newModel)
|
||||||
|
|
||||||
temp = np.abs(predicted_data - test_label)
|
|
||||||
temp1 = (temp - np.broadcast_to(np.mean(temp, axis=0), shape=predicted_data.shape))
|
|
||||||
temp2 = np.broadcast_to(np.sqrt(np.var(temp, axis=0)), shape=predicted_data.shape)
|
|
||||||
temp3 = temp1 / temp2
|
|
||||||
mse = np.sum((temp1 / temp2) ** 2, axis=1)
|
|
||||||
|
|
||||||
plt.plot(mse)
|
|
||||||
plt.plot(mean)
|
|
||||||
plt.plot(max)
|
|
||||||
plt.show()
|
|
||||||
|
|
||||||
data = pd.DataFrame(mse).ewm(span=3).mean()
|
|
||||||
print(data)
|
|
||||||
data = np.array(data)
|
|
||||||
|
|
||||||
index, _ = data.shape
|
|
||||||
|
|
||||||
for i in range(2396):
|
|
||||||
if data[i, 0] > 5:
|
|
||||||
data[i, 0] = data[i - 1, :]
|
|
||||||
print(data)
|
|
||||||
mean = data[2000:2396, :].mean()
|
|
||||||
std = data[2000:2396, :].std()
|
|
||||||
mean = np.broadcast_to(mean, shape=[500, ])
|
|
||||||
std = np.broadcast_to(std, shape=[500, ])
|
|
||||||
plt.plot(data[2000:2396, :])
|
|
||||||
plt.plot(mean)
|
|
||||||
plt.plot(mean + 3 * std)
|
|
||||||
plt.plot(mean - 3 * std)
|
|
||||||
plt.show()
|
|
||||||
|
|
||||||
pass
|
pass
|
||||||
|
|
|
||||||
|
|
@ -6,5 +6,440 @@
|
||||||
@Author : dingjiawen
|
@Author : dingjiawen
|
||||||
@Date : 2022/10/11 18:52
|
@Date : 2022/10/11 18:52
|
||||||
@Usage : 对比实验,与JointNet相同深度,进行预测
|
@Usage : 对比实验,与JointNet相同深度,进行预测
|
||||||
@Desc :
|
@Desc : CNN-GRU
|
||||||
'''
|
'''
|
||||||
|
import tensorflow as tf
|
||||||
|
import tensorflow.keras
|
||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
from model.DepthwiseCon1D.DepthwiseConv1D import DepthwiseConv1D
|
||||||
|
from model.Dynamic_channelAttention.Dynamic_channelAttention import DynamicChannelAttention
|
||||||
|
from condition_monitoring.data_deal import loadData
|
||||||
|
from model.Joint_Monitoring.Joint_Monitoring3 import Joint_Monitoring
|
||||||
|
|
||||||
|
from model.CommonFunction.CommonFunction import *
|
||||||
|
from sklearn.model_selection import train_test_split
|
||||||
|
from tensorflow.keras.models import load_model, save_model
|
||||||
|
from keras.callbacks import EarlyStopping
|
||||||
|
import random
|
||||||
|
|
||||||
|
'''超参数设置'''
|
||||||
|
time_stamp = 120
|
||||||
|
feature_num = 10
|
||||||
|
batch_size = 16
|
||||||
|
learning_rate = 0.001
|
||||||
|
EPOCH = 101
|
||||||
|
model_name = "DCNN-GRU"
|
||||||
|
'''EWMA超参数'''
|
||||||
|
K = 18
|
||||||
|
namuda = 0.01
|
||||||
|
'''保存名称'''
|
||||||
|
|
||||||
|
save_name = "./model/{0}_timestamp{1}_feature{2}.h5".format(model_name,
|
||||||
|
time_stamp,
|
||||||
|
feature_num,
|
||||||
|
batch_size,
|
||||||
|
EPOCH)
|
||||||
|
save_step_two_name = "../hard_model/two_weight/{0}_timestamp{1}_feature{2}_weight_epoch14/weight".format(model_name,
|
||||||
|
time_stamp,
|
||||||
|
feature_num,
|
||||||
|
batch_size,
|
||||||
|
EPOCH)
|
||||||
|
|
||||||
|
# save_name = "../model/joint/{0}_timestamp{1}_feature{2}.h5".format(model_name,
|
||||||
|
# time_stamp,
|
||||||
|
# feature_num,
|
||||||
|
# batch_size,
|
||||||
|
# EPOCH)
|
||||||
|
# save_step_two_name = "../model/joint_two/{0}_timestamp{1}_feature{2}.h5".format(model_name,
|
||||||
|
# time_stamp,
|
||||||
|
# feature_num,
|
||||||
|
# batch_size,
|
||||||
|
# EPOCH)
|
||||||
|
'''文件名'''
|
||||||
|
file_name = "G:\data\SCADA数据\jb4q_8_delete_total_zero.csv"
|
||||||
|
|
||||||
|
'''
|
||||||
|
文件说明:jb4q_8_delete_total_zero.csv是删除了只删除了全是0的列的文件
|
||||||
|
文件从0:415548行均是正常值(2019/7.30 00:00:00 - 2019/9/18 11:14:00)
|
||||||
|
从415549:432153行均是异常值(2019/9/18 11:21:01 - 2021/1/18 00:00:00)
|
||||||
|
'''
|
||||||
|
'''文件参数'''
|
||||||
|
# 最后正常的时间点
|
||||||
|
healthy_date = 415548
|
||||||
|
# 最后异常的时间点
|
||||||
|
unhealthy_date = 432153
|
||||||
|
# 异常容忍程度
|
||||||
|
unhealthy_patience = 5
|
||||||
|
|
||||||
|
|
||||||
|
def remove(data, time_stamp=time_stamp):
|
||||||
|
rows, cols = data.shape
|
||||||
|
print("remove_data.shape:", data.shape)
|
||||||
|
num = int(rows / time_stamp)
|
||||||
|
|
||||||
|
return data[:num * time_stamp, :]
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# 不重叠采样
|
||||||
|
def get_training_data(data, time_stamp: int = time_stamp):
|
||||||
|
removed_data = remove(data=data)
|
||||||
|
rows, cols = removed_data.shape
|
||||||
|
print("removed_data.shape:", data.shape)
|
||||||
|
print("removed_data:", removed_data)
|
||||||
|
train_data = np.reshape(removed_data, [-1, time_stamp, cols])
|
||||||
|
print("train_data:", train_data)
|
||||||
|
batchs, time_stamp, cols = train_data.shape
|
||||||
|
|
||||||
|
for i in range(1, batchs):
|
||||||
|
each_label = np.expand_dims(train_data[i, 0, :], axis=0)
|
||||||
|
if i == 1:
|
||||||
|
train_label = each_label
|
||||||
|
else:
|
||||||
|
train_label = np.concatenate([train_label, each_label], axis=0)
|
||||||
|
|
||||||
|
print("train_data.shape:", train_data.shape)
|
||||||
|
print("train_label.shape", train_label.shape)
|
||||||
|
return train_data[:-1, :], train_label
|
||||||
|
|
||||||
|
|
||||||
|
# 重叠采样
|
||||||
|
def get_training_data_overlapping(data, time_stamp: int = time_stamp, is_Healthy: bool = True):
|
||||||
|
rows, cols = data.shape
|
||||||
|
train_data = np.empty(shape=[rows - time_stamp - 1, time_stamp, cols])
|
||||||
|
train_label = np.empty(shape=[rows - time_stamp - 1, cols])
|
||||||
|
for i in range(rows):
|
||||||
|
if i + time_stamp >= rows:
|
||||||
|
break
|
||||||
|
if i + time_stamp < rows - 1:
|
||||||
|
train_data[i] = data[i:i + time_stamp]
|
||||||
|
train_label[i] = data[i + time_stamp]
|
||||||
|
|
||||||
|
print("重叠采样以后:")
|
||||||
|
print("data:", train_data) # (300334,120,10)
|
||||||
|
print("label:", train_label) # (300334,10)
|
||||||
|
|
||||||
|
if is_Healthy:
|
||||||
|
train_label2 = np.ones(shape=[train_label.shape[0]])
|
||||||
|
else:
|
||||||
|
train_label2 = np.zeros(shape=[train_label.shape[0]])
|
||||||
|
|
||||||
|
print("label2:", train_label2)
|
||||||
|
|
||||||
|
return train_data, train_label, train_label2
|
||||||
|
|
||||||
|
|
||||||
|
# 归一化
|
||||||
|
def normalization(data):
|
||||||
|
rows, cols = data.shape
|
||||||
|
print("归一化之前:", data)
|
||||||
|
print(data.shape)
|
||||||
|
print("======================")
|
||||||
|
|
||||||
|
# 归一化
|
||||||
|
max = np.max(data, axis=0)
|
||||||
|
max = np.broadcast_to(max, [rows, cols])
|
||||||
|
min = np.min(data, axis=0)
|
||||||
|
min = np.broadcast_to(min, [rows, cols])
|
||||||
|
|
||||||
|
data = (data - min) / (max - min)
|
||||||
|
print("归一化之后:", data)
|
||||||
|
print(data.shape)
|
||||||
|
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
# 正则化
|
||||||
|
def Regularization(data):
|
||||||
|
rows, cols = data.shape
|
||||||
|
print("正则化之前:", data)
|
||||||
|
print(data.shape)
|
||||||
|
print("======================")
|
||||||
|
|
||||||
|
# 正则化
|
||||||
|
mean = np.mean(data, axis=0)
|
||||||
|
mean = np.broadcast_to(mean, shape=[rows, cols])
|
||||||
|
dst = np.sqrt(np.var(data, axis=0))
|
||||||
|
dst = np.broadcast_to(dst, shape=[rows, cols])
|
||||||
|
data = (data - mean) / dst
|
||||||
|
print("正则化之后:", data)
|
||||||
|
print(data.shape)
|
||||||
|
|
||||||
|
return data
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def EWMA(data, K=K, namuda=namuda):
|
||||||
|
# t是啥暂时未知
|
||||||
|
t = 0
|
||||||
|
mid = np.mean(data, axis=0)
|
||||||
|
standard = np.sqrt(np.var(data, axis=0))
|
||||||
|
UCL = mid + K * standard * np.sqrt(namuda / (2 - namuda) * (1 - (1 - namuda) ** 2 * t))
|
||||||
|
LCL = mid - K * standard * np.sqrt(namuda / (2 - namuda) * (1 - (1 - namuda) ** 2 * t))
|
||||||
|
return mid, UCL, LCL
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def get_MSE(data, label, new_model, isStandard: bool = True, isPlot: bool = True):
|
||||||
|
predicted_data = new_model.predict(data)
|
||||||
|
|
||||||
|
temp = np.abs(predicted_data - label)
|
||||||
|
temp1 = (temp - np.broadcast_to(np.mean(temp, axis=0), shape=predicted_data.shape))
|
||||||
|
temp2 = np.broadcast_to(np.sqrt(np.var(temp, axis=0)), shape=predicted_data.shape)
|
||||||
|
temp3 = temp1 / temp2
|
||||||
|
mse = np.sum((temp1 / temp2) ** 2, axis=1)
|
||||||
|
print("z:", mse)
|
||||||
|
print(mse.shape)
|
||||||
|
|
||||||
|
# mse=np.mean((predicted_data-label)**2,axis=1)
|
||||||
|
print("mse", mse)
|
||||||
|
if isStandard:
|
||||||
|
dims, = mse.shape
|
||||||
|
mean = np.mean(mse)
|
||||||
|
std = np.sqrt(np.var(mse))
|
||||||
|
max = mean + 3 * std
|
||||||
|
print("max:", max)
|
||||||
|
# min = mean-3*std
|
||||||
|
max = np.broadcast_to(max, shape=[dims, ])
|
||||||
|
# min = np.broadcast_to(min,shape=[dims,])
|
||||||
|
mean = np.broadcast_to(mean, shape=[dims, ])
|
||||||
|
if isPlot:
|
||||||
|
plt.figure(random.randint(1,9))
|
||||||
|
plt.plot(max)
|
||||||
|
plt.plot(mse)
|
||||||
|
plt.plot(mean)
|
||||||
|
# plt.plot(min)
|
||||||
|
plt.show()
|
||||||
|
else:
|
||||||
|
if isPlot:
|
||||||
|
plt.figure(random.randint(1, 9))
|
||||||
|
plt.plot(mse)
|
||||||
|
# plt.plot(min)
|
||||||
|
plt.show()
|
||||||
|
return mse
|
||||||
|
|
||||||
|
return mse, mean, max
|
||||||
|
# pass
|
||||||
|
|
||||||
|
|
||||||
|
def condition_monitoring_model():
|
||||||
|
input = tf.keras.Input(shape=[time_stamp, feature_num])
|
||||||
|
conv1 = tf.keras.layers.Conv1D(filters=256, kernel_size=1)(input)
|
||||||
|
GRU1 = tf.keras.layers.GRU(128, return_sequences=False)(conv1)
|
||||||
|
d1 = tf.keras.layers.Dense(300)(GRU1)
|
||||||
|
output = tf.keras.layers.Dense(10)(d1)
|
||||||
|
|
||||||
|
model = tf.keras.Model(inputs=input, outputs=output)
|
||||||
|
|
||||||
|
return model
|
||||||
|
|
||||||
|
|
||||||
|
# trian_data:(300455,120,10)
|
||||||
|
# trian_label1:(300455,10)
|
||||||
|
# trian_label2:(300455,)
|
||||||
|
def shuffle(train_data, train_label1, train_label2, is_split: bool = False, split_size: float = 0.2):
|
||||||
|
(train_data, test_data, train_label1, test_label1, train_label2, test_label2) = train_test_split(train_data,
|
||||||
|
train_label1,
|
||||||
|
train_label2,
|
||||||
|
test_size=split_size,
|
||||||
|
shuffle=True,
|
||||||
|
random_state=100)
|
||||||
|
if is_split:
|
||||||
|
return train_data, train_label1, train_label2, test_data, test_label1, test_label2
|
||||||
|
train_data = np.concatenate([train_data, test_data], axis=0)
|
||||||
|
train_label1 = np.concatenate([train_label1, test_label1], axis=0)
|
||||||
|
train_label2 = np.concatenate([train_label2, test_label2], axis=0)
|
||||||
|
# print(train_data.shape)
|
||||||
|
# print(train_label1.shape)
|
||||||
|
# print(train_label2.shape)
|
||||||
|
# print(train_data.shape)
|
||||||
|
|
||||||
|
return train_data, train_label1, train_label2
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def split_test_data(healthy_data, healthy_label1, healthy_label2, unhealthy_data, unhealthy_label1, unhealthy_label2,
|
||||||
|
split_size: float = 0.2, shuffle: bool = True):
|
||||||
|
data = np.concatenate([healthy_data, unhealthy_data], axis=0)
|
||||||
|
label1 = np.concatenate([healthy_label1, unhealthy_label1], axis=0)
|
||||||
|
label2 = np.concatenate([healthy_label2, unhealthy_label2], axis=0)
|
||||||
|
(train_data, test_data, train_label1, test_label1, train_label2, test_label2) = train_test_split(data,
|
||||||
|
label1,
|
||||||
|
label2,
|
||||||
|
test_size=split_size,
|
||||||
|
shuffle=shuffle,
|
||||||
|
random_state=100)
|
||||||
|
|
||||||
|
# print(train_data.shape)
|
||||||
|
# print(train_label1.shape)
|
||||||
|
# print(train_label2.shape)
|
||||||
|
# print(train_data.shape)
|
||||||
|
|
||||||
|
return train_data, train_label1, train_label2, test_data, test_label1, test_label2
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def test(step_one_model, step_two_model, test_data, test_label1, test_label2):
|
||||||
|
history_loss = []
|
||||||
|
history_val_loss = []
|
||||||
|
|
||||||
|
val_loss, val_accuracy = step_two_model.get_val_loss(val_data=test_data, val_label1=test_label1,
|
||||||
|
val_label2=test_label2,
|
||||||
|
is_first_time=False, step_one_model=step_one_model)
|
||||||
|
|
||||||
|
history_val_loss.append(val_loss)
|
||||||
|
print("val_accuracy:", val_accuracy)
|
||||||
|
print("val_loss:", val_loss)
|
||||||
|
|
||||||
|
|
||||||
|
def showResult(step_two_model: Joint_Monitoring, test_data, isPlot: bool = False):
|
||||||
|
# 获取模型的所有参数的个数
|
||||||
|
# step_two_model.count_params()
|
||||||
|
total_result = []
|
||||||
|
size, length, dims = test_data.shape
|
||||||
|
for epoch in range(0, size - batch_size + 1, batch_size):
|
||||||
|
each_test_data = test_data[epoch:epoch + batch_size, :, :]
|
||||||
|
_, _, _, output4 = step_two_model.call(each_test_data, is_first_time=False)
|
||||||
|
total_result.append(output4)
|
||||||
|
total_result = np.reshape(total_result, [total_result.__len__(), -1])
|
||||||
|
total_result = np.reshape(total_result, [-1, ])
|
||||||
|
if isPlot:
|
||||||
|
plt.scatter(list(range(total_result.shape[0])), total_result, c='black', s=10)
|
||||||
|
# 画出 y=1 这条水平线
|
||||||
|
plt.axhline(0.5, c='red', label='Failure threshold')
|
||||||
|
# 箭头指向上面的水平线
|
||||||
|
# plt.arrow(35000, 0.9, 33000, 0.75, head_width=0.02, head_length=0.1, shape="full", fc='red', ec='red',
|
||||||
|
# alpha=0.9, overhang=0.5)
|
||||||
|
# plt.text(35000, 0.9, "Truth Fault", fontsize=10, color='black', verticalalignment='top')
|
||||||
|
plt.axvline(test_data.shape[0] * 2 / 3, c='blue', ls='-.')
|
||||||
|
plt.xlabel("time")
|
||||||
|
plt.ylabel("confience")
|
||||||
|
plt.text(total_result.shape[0] * 4 / 5, 0.6, "Fault", fontsize=10, color='black', verticalalignment='top',
|
||||||
|
horizontalalignment='center',
|
||||||
|
bbox={'facecolor': 'grey',
|
||||||
|
'pad': 10})
|
||||||
|
plt.text(total_result.shape[0] * 1 / 3, 0.4, "Norm", fontsize=10, color='black', verticalalignment='top',
|
||||||
|
horizontalalignment='center',
|
||||||
|
bbox={'facecolor': 'grey',
|
||||||
|
'pad': 10})
|
||||||
|
plt.grid()
|
||||||
|
# plt.ylim(0, 1)
|
||||||
|
# plt.xlim(-50, 1300)
|
||||||
|
# plt.legend("", loc='upper left')
|
||||||
|
plt.show()
|
||||||
|
return total_result
|
||||||
|
|
||||||
|
|
||||||
|
def CNN_GRU_Model():
|
||||||
|
input = tf.keras.Input(shape=[time_stamp, feature_num])
|
||||||
|
input = tf.cast(input, tf.float32)
|
||||||
|
|
||||||
|
LSTM = tf.keras.layers.Conv1D(10, 3, padding="causal", dilation_rate=2)(input)
|
||||||
|
LSTM = tf.keras.layers.Conv1D(20, 3, padding="causal", dilation_rate=4)(LSTM)
|
||||||
|
LSTM = tf.keras.layers.Conv1D(40, 3, padding="causal", dilation_rate=8)(LSTM)
|
||||||
|
LSTM = tf.keras.layers.Conv1D(40, 3, padding="causal", dilation_rate=8)(LSTM)
|
||||||
|
LSTM = tf.keras.layers.GRU(units=40, return_sequences=True)(LSTM)
|
||||||
|
LSTM = tf.keras.layers.GRU(units=40, return_sequences=True)(LSTM)
|
||||||
|
LSTM = tf.keras.layers.GRU(units=40, return_sequences=True)(LSTM)
|
||||||
|
LSTM = tf.keras.layers.GRU(units=40, return_sequences=False)(LSTM)
|
||||||
|
|
||||||
|
# bn = tf.keras.layers.BatchNormalization()(LSTM)
|
||||||
|
|
||||||
|
d1 = tf.keras.layers.Dense(20)(LSTM)
|
||||||
|
# bn = tf.keras.layers.BatchNormalization()(d1)
|
||||||
|
|
||||||
|
output = tf.keras.layers.Dense(10, name='output')(d1)
|
||||||
|
model = tf.keras.Model(inputs=input, outputs=output)
|
||||||
|
return model
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# healthy_data是健康数据,用于确定阈值,all_data是完整的数据,用于模型出结果
|
||||||
|
def getResult(model: tf.keras.Model, healthy_data, healthy_label, unhealthy_data, unhealthy_label, isPlot: bool = False,
|
||||||
|
isSave: bool = False):
|
||||||
|
# TODO 计算MSE确定阈值
|
||||||
|
|
||||||
|
mse, mean, max = get_MSE(healthy_data, healthy_label, model)
|
||||||
|
|
||||||
|
# 误报率的计算
|
||||||
|
total, = mse.shape
|
||||||
|
faultNum = 0
|
||||||
|
faultList = []
|
||||||
|
faultNum=mse[mse[:]>max[0]].__len__()
|
||||||
|
# for i in range(total):
|
||||||
|
# if (mse[i] > max[i]):
|
||||||
|
# faultNum += 1
|
||||||
|
# faultList.append(mse[i])
|
||||||
|
|
||||||
|
fault_rate = faultNum / total
|
||||||
|
print("误报率:", fault_rate)
|
||||||
|
|
||||||
|
# 漏报率计算
|
||||||
|
missNum = 0
|
||||||
|
missList = []
|
||||||
|
mse1 = get_MSE(unhealthy_data, unhealthy_label, model, isStandard=False)
|
||||||
|
all, = mse1.shape
|
||||||
|
|
||||||
|
flag=True
|
||||||
|
for i in range(all):
|
||||||
|
if (mse1[i] < max[0] and flag) :
|
||||||
|
missNum += 1
|
||||||
|
missList.append(mse1[i])
|
||||||
|
elif(mse1[i]>max[0]):
|
||||||
|
print(i)
|
||||||
|
flag=False
|
||||||
|
print("all:",all)
|
||||||
|
miss_rate = missNum / all
|
||||||
|
print("漏报率:", miss_rate)
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
total_data = loadData.execute(N=feature_num, file_name=file_name)
|
||||||
|
total_data = normalization(data=total_data)
|
||||||
|
train_data_healthy, train_label1_healthy, train_label2_healthy = get_training_data_overlapping(
|
||||||
|
total_data[:healthy_date, :], is_Healthy=True)
|
||||||
|
train_data_unhealthy, train_label1_unhealthy, train_label2_unhealthy = get_training_data_overlapping(
|
||||||
|
total_data[healthy_date - time_stamp + unhealthy_patience:unhealthy_date, :],
|
||||||
|
is_Healthy=False)
|
||||||
|
#### TODO 第一步训练
|
||||||
|
# 单次测试
|
||||||
|
model = CNN_GRU_Model()
|
||||||
|
|
||||||
|
model.compile(optimizer=tf.optimizers.Adam(0.01), loss=tf.losses.mse)
|
||||||
|
model.summary()
|
||||||
|
early_stop = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=3, mode='min', verbose=1)
|
||||||
|
|
||||||
|
checkpoint = tf.keras.callbacks.ModelCheckpoint(
|
||||||
|
filepath=save_name,
|
||||||
|
monitor='val_loss',
|
||||||
|
verbose=1,
|
||||||
|
save_best_only=True,
|
||||||
|
mode='min',
|
||||||
|
period=1)
|
||||||
|
|
||||||
|
# history = model.fit(train_data_healthy[:30000, :, :], train_label1_healthy[:30000, :], epochs=20,
|
||||||
|
# batch_size=32, validation_split=0.2, shuffle=False, verbose=1,
|
||||||
|
# callbacks=[checkpoint, early_stop])
|
||||||
|
# model.save(save_name)
|
||||||
|
|
||||||
|
## TODO testing
|
||||||
|
# test_data, test_label = get_training_data(total_data[:healthy_date, :])
|
||||||
|
# newModel = tf.keras.models.load_model(save_name)
|
||||||
|
# mse, mean, max = get_MSE(test_data, test_label, new_model=newModel)
|
||||||
|
|
||||||
|
healthy_size, _, _ = train_data_healthy.shape
|
||||||
|
unhealthy_size, _, _ = train_data_unhealthy.shape
|
||||||
|
all_data, _, _ = get_training_data_overlapping(
|
||||||
|
total_data[healthy_size - 2 * unhealthy_size:unhealthy_date, :], is_Healthy=True)
|
||||||
|
|
||||||
|
newModel = tf.keras.models.load_model(save_name)
|
||||||
|
getResult(newModel, healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||||
|
healthy_label=train_label1_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||||
|
unhealthy_data=train_data_unhealthy, unhealthy_label=train_label1_unhealthy)
|
||||||
|
# mse, mean, max = get_MSE(train_data_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||||
|
# train_label1_healthy[healthy_size - 2 * unhealthy_size:, :], new_model=newModel)
|
||||||
|
pass
|
||||||
|
|
|
||||||
|
|
@ -17,15 +17,12 @@ import matplotlib.pyplot as plt
|
||||||
from model.DepthwiseCon1D.DepthwiseConv1D import DepthwiseConv1D
|
from model.DepthwiseCon1D.DepthwiseConv1D import DepthwiseConv1D
|
||||||
from model.Dynamic_channelAttention.Dynamic_channelAttention import DynamicChannelAttention
|
from model.Dynamic_channelAttention.Dynamic_channelAttention import DynamicChannelAttention
|
||||||
from condition_monitoring.data_deal import loadData
|
from condition_monitoring.data_deal import loadData
|
||||||
from model.Joint_Monitoring.Joint_Monitoring3 import Joint_Monitoring
|
from model.Joint_Monitoring.compare.RNet import Joint_Monitoring
|
||||||
|
|
||||||
from model.CommonFunction.CommonFunction import *
|
from model.CommonFunction.CommonFunction import *
|
||||||
from sklearn.model_selection import train_test_split
|
from sklearn.model_selection import train_test_split
|
||||||
from tensorflow.keras.models import load_model, save_model
|
from tensorflow.keras.models import load_model, save_model
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
'''超参数设置'''
|
'''超参数设置'''
|
||||||
time_stamp = 120
|
time_stamp = 120
|
||||||
feature_num = 10
|
feature_num = 10
|
||||||
|
|
@ -38,16 +35,16 @@ K = 18
|
||||||
namuda = 0.01
|
namuda = 0.01
|
||||||
'''保存名称'''
|
'''保存名称'''
|
||||||
|
|
||||||
save_name = "../hard_model/weight/{0}_timestamp{1}_feature{2}_weight_epoch8/weight".format(model_name,
|
save_name = "./model/weight/{0}_timestamp{1}_feature{2}_weight/weight".format(model_name,
|
||||||
|
time_stamp,
|
||||||
|
feature_num,
|
||||||
|
batch_size,
|
||||||
|
EPOCH)
|
||||||
|
save_step_two_name = "./model/two_weight/{0}_timestamp{1}_feature{2}_weight/weight".format(model_name,
|
||||||
time_stamp,
|
time_stamp,
|
||||||
feature_num,
|
feature_num,
|
||||||
batch_size,
|
batch_size,
|
||||||
EPOCH)
|
EPOCH)
|
||||||
save_step_two_name = "../hard_model/two_weight/{0}_timestamp{1}_feature{2}_weight_epoch14/weight".format(model_name,
|
|
||||||
time_stamp,
|
|
||||||
feature_num,
|
|
||||||
batch_size,
|
|
||||||
EPOCH)
|
|
||||||
|
|
||||||
# save_name = "../model/joint/{0}_timestamp{1}_feature{2}.h5".format(model_name,
|
# save_name = "../model/joint/{0}_timestamp{1}_feature{2}.h5".format(model_name,
|
||||||
# time_stamp,
|
# time_stamp,
|
||||||
|
|
@ -536,8 +533,10 @@ if __name__ == '__main__':
|
||||||
is_Healthy=False)
|
is_Healthy=False)
|
||||||
#### TODO 第一步训练
|
#### TODO 第一步训练
|
||||||
# 单次测试
|
# 单次测试
|
||||||
# train_step_one(train_data=train_data_healthy[:32, :, :], train_label1=train_label1_healthy[:32, :],train_label2=train_label2_healthy[:32, ])
|
# train_step_one(train_data=train_data_healthy[:256, :, :], train_label1=train_label1_healthy[:256, :],
|
||||||
# train_step_one(train_data=train_data_healthy, train_label1=train_label1_healthy, train_label2=train_label2_healthy)
|
# train_label2=train_label2_healthy[:256, ])
|
||||||
|
|
||||||
|
train_step_one(train_data=train_data_healthy, train_label1=train_label1_healthy, train_label2=train_label2_healthy)
|
||||||
|
|
||||||
# 导入第一步已经训练好的模型,一个继续训练,一个只输出结果
|
# 导入第一步已经训练好的模型,一个继续训练,一个只输出结果
|
||||||
# step_one_model = Joint_Monitoring()
|
# step_one_model = Joint_Monitoring()
|
||||||
|
|
@ -546,34 +545,34 @@ if __name__ == '__main__':
|
||||||
# step_two_model = Joint_Monitoring()
|
# step_two_model = Joint_Monitoring()
|
||||||
# step_two_model.load_weights(save_name)
|
# step_two_model.load_weights(save_name)
|
||||||
|
|
||||||
#### TODO 第二步训练
|
# #### TODO 第二步训练
|
||||||
### healthy_data.shape: (300333,120,10)
|
# ### healthy_data.shape: (300333,120,10)
|
||||||
### unhealthy_data.shape: (16594,10)
|
# ### unhealthy_data.shape: (16594,10)
|
||||||
healthy_size, _, _ = train_data_healthy.shape
|
# healthy_size, _, _ = train_data_healthy.shape
|
||||||
unhealthy_size, _, _ = train_data_unhealthy.shape
|
# unhealthy_size, _, _ = train_data_unhealthy.shape
|
||||||
# train_data, train_label1, train_label2, test_data, test_label1, test_label2 = split_test_data(
|
# # train_data, train_label1, train_label2, test_data, test_label1, test_label2 = split_test_data(
|
||||||
# healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:, :, :],
|
# # healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:, :, :],
|
||||||
# healthy_label1=train_label1_healthy[healthy_size - 2 * unhealthy_size:, :],
|
# # healthy_label1=train_label1_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||||
# healthy_label2=train_label2_healthy[healthy_size - 2 * unhealthy_size:, ], unhealthy_data=train_data_unhealthy,
|
# # healthy_label2=train_label2_healthy[healthy_size - 2 * unhealthy_size:, ], unhealthy_data=train_data_unhealthy,
|
||||||
# unhealthy_label1=train_label1_unhealthy, unhealthy_label2=train_label2_unhealthy)
|
# # unhealthy_label1=train_label1_unhealthy, unhealthy_label2=train_label2_unhealthy)
|
||||||
# train_step_two(step_one_model=step_one_model, step_two_model=step_two_model,
|
# # train_step_two(step_one_model=step_one_model, step_two_model=step_two_model,
|
||||||
# train_data=train_data,
|
# # train_data=train_data,
|
||||||
# train_label1=train_label1, train_label2=np.expand_dims(train_label2, axis=-1))
|
# # train_label1=train_label1, train_label2=np.expand_dims(train_label2, axis=-1))
|
||||||
|
#
|
||||||
### TODO 测试测试集
|
# ### TODO 测试测试集
|
||||||
step_one_model = Joint_Monitoring()
|
# step_one_model = Joint_Monitoring()
|
||||||
step_one_model.load_weights(save_name)
|
# step_one_model.load_weights(save_name)
|
||||||
step_two_model = Joint_Monitoring()
|
# step_two_model = Joint_Monitoring()
|
||||||
step_two_model.load_weights(save_step_two_name)
|
# step_two_model.load_weights(save_step_two_name)
|
||||||
# test(step_one_model=step_one_model, step_two_model=step_two_model, test_data=test_data, test_label1=test_label1,
|
# # test(step_one_model=step_one_model, step_two_model=step_two_model, test_data=test_data, test_label1=test_label1,
|
||||||
# test_label2=np.expand_dims(test_label2, axis=-1))
|
# # test_label2=np.expand_dims(test_label2, axis=-1))
|
||||||
|
#
|
||||||
###TODO 展示全部的结果
|
# ###TODO 展示全部的结果
|
||||||
all_data, _, _ = get_training_data_overlapping(
|
# all_data, _, _ = get_training_data_overlapping(
|
||||||
total_data[healthy_size - 2 * unhealthy_size:unhealthy_date, :], is_Healthy=True)
|
# total_data[healthy_size - 2 * unhealthy_size:unhealthy_date, :], is_Healthy=True)
|
||||||
# all_data = np.concatenate([])
|
# # all_data = np.concatenate([])
|
||||||
# 单次测试
|
# # 单次测试
|
||||||
# showResult(step_two_model, test_data=all_data[:32], isPlot=True)
|
# # showResult(step_two_model, test_data=all_data[:32], isPlot=True)
|
||||||
showResult(step_two_model, test_data=all_data, isPlot=True)
|
# showResult(step_two_model, test_data=all_data, isPlot=True)
|
||||||
|
|
||||||
pass
|
pass
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue