From 2400af4a1a1c64d2a7f2fcca27854a9e1f9bacc8 Mon Sep 17 00:00:00 2001 From: kevinding1125 <745518019@qq.com> Date: Sun, 18 Jun 2023 14:13:45 +0800 Subject: [PATCH] =?UTF-8?q?=E6=A8=A1=E5=9E=8B=E6=9B=B4=E6=96=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../interview/OPPO/T0411/Question1.java | 41 + .../interview/OPPO/T0411/Question2.java | 47 ++ .../interview/baidu/T0410/Question1.java | 44 ++ .../interview/huawei/T0531/Question3.java | 37 +- .../HI_create/HI_create.py | 146 ++++ .../HI_create/HI_merge.py | 62 ++ .../HI_create/HI_merge_data.npy | Bin 0 -> 10544 bytes .../HI_create/HI_score.png | Bin 0 -> 7960 bytes .../HI_create/HI_select.py | 98 +++ .../HI_create/Select_data.npy | Bin 0 -> 134672 bytes .../HI_create/__init__.py | 0 .../2012轴承数据集预测挑战/HI_create/test.py | 13 + .../2012轴承数据集预测挑战/train/LSTM.py | 175 +++++ .../2012轴承数据集预测挑战/train/LSTM1.py | 215 +++++ .../2012轴承数据集预测挑战/train/LSTM2.py | 204 +++++ .../2012轴承数据集预测挑战/train/LSTM4.py | 481 ++++++++++++ .../2012轴承数据集预测挑战/train/LSTM5.py | 592 ++++++++++++++ .../2012轴承数据集预测挑战/train/__init__.py | 0 .../Model_train_test/RUL/FFTUtils.py | 1 + .../Model_train_test/RUL/ResultShowUtils.py | 116 +++ .../Model_train_test/RUL/test/DCTNet.py | 138 ++++ .../Model_train_test/RUL/test/DCTTest.py | 52 ++ .../Model_train_test/RUL/test/FFTTest.py | 6 +- .../RUL/test/LSTMContinueTest.py | 268 +++++++ .../Model_train_test/RUL/test/LSTMTest.py | 223 ++++++ .../ChannelAttention/DCT_channelAttention.py | 74 ++ .../Dynamic_channelAttention.py | 129 +++ .../Light_channelAttention.py | 0 .../SE_channelAttention.py | 0 .../model/ChannelAttention/__init__.py | 9 + .../model/CommonFunction/CommonFunction.py | 163 ++++ .../model/CommonFunction/__init__.py | 9 + .../model/DepthwiseCon1D/DepthwiseConv1D.py | 247 ++++++ .../model/DepthwiseCon1D/__init__.py | 9 + .../Joint_Monitoring/Joint_Monitoring.py | 402 ++++++++++ .../Joint_Monitoring/Joint_Monitoring2.py | 445 +++++++++++ .../Joint_Monitoring/Joint_Monitoring3.py | 453 +++++++++++ .../model/Joint_Monitoring/__init__.py | 9 + .../model/Joint_Monitoring/compare/RNet.py | 447 +++++++++++ .../Joint_Monitoring/compare/__init__.py | 10 + .../Model_train_test/model/LRU/lru.py | 17 +- .../Model_train_test/model/LSTM/LSTM.py | 116 +++ .../model/LSTM/LSTMByDense.py | 110 +++ .../Model_train_test/model/LSTM/__init__.py | 0 .../model/LSTM/before/LSTM_realize_self.py | 300 +++++++ .../model/LSTM/before/LSTM_realize_self1.py | 179 +++++ .../model/LSTM/before/LSTM_realize_self2.py | 162 ++++ .../model/LSTM/before/LSTM_realize_self3.py | 235 ++++++ .../model/LSTM/before/LSTM_realize_self4.py | 393 ++++++++++ .../model/LSTM/before/LSTM_realize_self5.py | 740 ++++++++++++++++++ .../model/LSTM/before/__init__.py | 8 + .../model/LossFunction/FTMSE.py | 25 +- .../model/LossFunction/GIoU_Loss.py | 72 ++ .../model/LossFunction/IoU_Loss.py | 47 ++ .../model/LossFunction/__init__.py | 10 + .../model/LossFunction/smooth_L1_Loss.py | 24 + .../Model_train_test/model/SAE/SAE_realize.py | 159 ++++ .../Model_train_test/model/SAE/__init__.py | 0 .../Model_train_test/model/VAE/VAE_realize.py | 143 ++++ .../Model_train_test/model/VAE/__init__.py | 0 .../Model_train_test/model/VMD/VMD_realize.py | 66 ++ .../Model_train_test/model/VMD/__init__.py | 0 .../Model_train_test/model/VMD/test.py | 45 ++ 63 files changed, 8195 insertions(+), 21 deletions(-) create mode 100644 Leecode/src/main/java/com/markilue/leecode/interview/OPPO/T0411/Question1.java create mode 100644 Leecode/src/main/java/com/markilue/leecode/interview/OPPO/T0411/Question2.java create mode 100644 Leecode/src/main/java/com/markilue/leecode/interview/baidu/T0410/Question1.java create mode 100644 TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/HI_create/HI_create.py create mode 100644 TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/HI_create/HI_merge.py create mode 100644 TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/HI_create/HI_merge_data.npy create mode 100644 TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/HI_create/HI_score.png create mode 100644 TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/HI_create/HI_select.py create mode 100644 TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/HI_create/Select_data.npy create mode 100644 TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/HI_create/__init__.py create mode 100644 TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/HI_create/test.py create mode 100644 TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/train/LSTM.py create mode 100644 TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/train/LSTM1.py create mode 100644 TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/train/LSTM2.py create mode 100644 TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/train/LSTM4.py create mode 100644 TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/train/LSTM5.py create mode 100644 TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/train/__init__.py create mode 100644 TensorFlow_eaxmple/Model_train_test/RUL/ResultShowUtils.py create mode 100644 TensorFlow_eaxmple/Model_train_test/RUL/test/DCTNet.py create mode 100644 TensorFlow_eaxmple/Model_train_test/RUL/test/DCTTest.py create mode 100644 TensorFlow_eaxmple/Model_train_test/RUL/test/LSTMContinueTest.py create mode 100644 TensorFlow_eaxmple/Model_train_test/RUL/test/LSTMTest.py create mode 100644 TensorFlow_eaxmple/Model_train_test/model/ChannelAttention/DCT_channelAttention.py create mode 100644 TensorFlow_eaxmple/Model_train_test/model/ChannelAttention/Dynamic_channelAttention.py rename TensorFlow_eaxmple/Model_train_test/model/{Dynamic_channelAttention => ChannelAttention}/Light_channelAttention.py (100%) rename TensorFlow_eaxmple/Model_train_test/model/{Dynamic_channelAttention => ChannelAttention}/SE_channelAttention.py (100%) create mode 100644 TensorFlow_eaxmple/Model_train_test/model/ChannelAttention/__init__.py create mode 100644 TensorFlow_eaxmple/Model_train_test/model/CommonFunction/CommonFunction.py create mode 100644 TensorFlow_eaxmple/Model_train_test/model/CommonFunction/__init__.py create mode 100644 TensorFlow_eaxmple/Model_train_test/model/DepthwiseCon1D/DepthwiseConv1D.py create mode 100644 TensorFlow_eaxmple/Model_train_test/model/DepthwiseCon1D/__init__.py create mode 100644 TensorFlow_eaxmple/Model_train_test/model/Joint_Monitoring/Joint_Monitoring.py create mode 100644 TensorFlow_eaxmple/Model_train_test/model/Joint_Monitoring/Joint_Monitoring2.py create mode 100644 TensorFlow_eaxmple/Model_train_test/model/Joint_Monitoring/Joint_Monitoring3.py create mode 100644 TensorFlow_eaxmple/Model_train_test/model/Joint_Monitoring/__init__.py create mode 100644 TensorFlow_eaxmple/Model_train_test/model/Joint_Monitoring/compare/RNet.py create mode 100644 TensorFlow_eaxmple/Model_train_test/model/Joint_Monitoring/compare/__init__.py create mode 100644 TensorFlow_eaxmple/Model_train_test/model/LSTM/LSTM.py create mode 100644 TensorFlow_eaxmple/Model_train_test/model/LSTM/LSTMByDense.py create mode 100644 TensorFlow_eaxmple/Model_train_test/model/LSTM/__init__.py create mode 100644 TensorFlow_eaxmple/Model_train_test/model/LSTM/before/LSTM_realize_self.py create mode 100644 TensorFlow_eaxmple/Model_train_test/model/LSTM/before/LSTM_realize_self1.py create mode 100644 TensorFlow_eaxmple/Model_train_test/model/LSTM/before/LSTM_realize_self2.py create mode 100644 TensorFlow_eaxmple/Model_train_test/model/LSTM/before/LSTM_realize_self3.py create mode 100644 TensorFlow_eaxmple/Model_train_test/model/LSTM/before/LSTM_realize_self4.py create mode 100644 TensorFlow_eaxmple/Model_train_test/model/LSTM/before/LSTM_realize_self5.py create mode 100644 TensorFlow_eaxmple/Model_train_test/model/LSTM/before/__init__.py create mode 100644 TensorFlow_eaxmple/Model_train_test/model/LossFunction/GIoU_Loss.py create mode 100644 TensorFlow_eaxmple/Model_train_test/model/LossFunction/IoU_Loss.py create mode 100644 TensorFlow_eaxmple/Model_train_test/model/LossFunction/__init__.py create mode 100644 TensorFlow_eaxmple/Model_train_test/model/LossFunction/smooth_L1_Loss.py create mode 100644 TensorFlow_eaxmple/Model_train_test/model/SAE/SAE_realize.py create mode 100644 TensorFlow_eaxmple/Model_train_test/model/SAE/__init__.py create mode 100644 TensorFlow_eaxmple/Model_train_test/model/VAE/VAE_realize.py create mode 100644 TensorFlow_eaxmple/Model_train_test/model/VAE/__init__.py create mode 100644 TensorFlow_eaxmple/Model_train_test/model/VMD/VMD_realize.py create mode 100644 TensorFlow_eaxmple/Model_train_test/model/VMD/__init__.py create mode 100644 TensorFlow_eaxmple/Model_train_test/model/VMD/test.py diff --git a/Leecode/src/main/java/com/markilue/leecode/interview/OPPO/T0411/Question1.java b/Leecode/src/main/java/com/markilue/leecode/interview/OPPO/T0411/Question1.java new file mode 100644 index 0000000..57faf38 --- /dev/null +++ b/Leecode/src/main/java/com/markilue/leecode/interview/OPPO/T0411/Question1.java @@ -0,0 +1,41 @@ +package com.markilue.leecode.interview.OPPO.T0411; + +import java.util.Scanner; + +/** + *@BelongsProject: Leecode + *@BelongsPackage: com.markilue.leecode.interview.OPPO.T0411 + *@Author: markilue + *@CreateTime: 2023-06-14 10:41 + *@Description: TODO + *@Version: 1.0 + */ +public class Question1 { + + public static void main(String[] args) { + + Scanner sc = new Scanner(System.in); + String s = sc.next(); + solve(s); + } + + private static void solve(String s) { + int left = 0; + int right = 0; + int xiaochu = 0; + for (int i = 0; i < s.length(); i++) { + char cur = s.charAt(i); + if (cur == '(') { + left++; + } else if (cur == ')') { + if (left > 0) { + left--; + xiaochu++; + } else { + right++; + } + } + } + System.out.println(s.length() - xiaochu); + } +} diff --git a/Leecode/src/main/java/com/markilue/leecode/interview/OPPO/T0411/Question2.java b/Leecode/src/main/java/com/markilue/leecode/interview/OPPO/T0411/Question2.java new file mode 100644 index 0000000..b8be0ad --- /dev/null +++ b/Leecode/src/main/java/com/markilue/leecode/interview/OPPO/T0411/Question2.java @@ -0,0 +1,47 @@ +package com.markilue.leecode.interview.OPPO.T0411; + +import java.util.Arrays; +import java.util.Scanner; + +/** + *@BelongsProject: Leecode + *@BelongsPackage: com.markilue.leecode.interview.OPPO.T0411 + *@Author: markilue + *@CreateTime: 2023-06-14 11:02 + *@Description: TODO + *@Version: 1.0 + */ +public class Question2 { + + public static void main(String[] args) { + Scanner sc = new Scanner(System.in); + int n = sc.nextInt(); + solve(n); + } + + private static void solve(int n) { + if (n % 2 == 0) { + if (n == 2) { + System.out.println(2); + return; + } + System.out.println(cal(n / 2) * 2 * 2 % mod); + } else { + System.out.println(cal(n / 2 + 1) * cal(n / 2) % mod); + } + } + + static long[] memo = new long[(int) 1e5]; + static long mod = (long) (1e9 + 7); + + public static long cal(int n) { + if (memo[n] != 0) { + return memo[n]; + } else if (n == 1) { + return 1; + } + memo[n] = n * cal(n - 1) % mod; + return memo[n]; + } + +} diff --git a/Leecode/src/main/java/com/markilue/leecode/interview/baidu/T0410/Question1.java b/Leecode/src/main/java/com/markilue/leecode/interview/baidu/T0410/Question1.java new file mode 100644 index 0000000..7756bf2 --- /dev/null +++ b/Leecode/src/main/java/com/markilue/leecode/interview/baidu/T0410/Question1.java @@ -0,0 +1,44 @@ +package com.markilue.leecode.interview.baidu.T0410; + +import java.util.Arrays; +import java.util.Scanner; + +/** + *@BelongsProject: Leecode + *@BelongsPackage: com.markilue.leecode.interview.baidu.T0410 + *@Author: markilue + *@CreateTime: 2023-06-14 11:32 + *@Description: TODO + *@Version: 1.0 + */ +public class Question1 { + + public static void main(String[] args) { + Scanner sc = new Scanner(System.in); + int n = sc.nextInt(); + int k = sc.nextInt(); + int[] nums = new int[n]; + for (int i = 0; i < n; i++) { + nums[i] = sc.nextInt(); + } + solve(nums, k); + } + + //猜测:前k-1个单独分最小的前k-1个数;后面全在一起 + private static void solve(int[] nums, int k) { + Arrays.sort(nums); + + double result = 0; + //前k-1个单独是一个 + for (int i = 0; i < k - 1; i++) { + result += nums[i]; + } + //后面全在一起 + double temp = 0; + for (int i = k - 1; i < nums.length; i++) { + temp += nums[i]; + } + double avg = temp / (nums.length - k + 1); + System.out.println((result+avg)); + } +} diff --git a/Leecode/src/main/java/com/markilue/leecode/interview/huawei/T0531/Question3.java b/Leecode/src/main/java/com/markilue/leecode/interview/huawei/T0531/Question3.java index 4cc36df..11cc9bf 100644 --- a/Leecode/src/main/java/com/markilue/leecode/interview/huawei/T0531/Question3.java +++ b/Leecode/src/main/java/com/markilue/leecode/interview/huawei/T0531/Question3.java @@ -39,7 +39,7 @@ public class Question3 { {-2, -3, 4}, }; // calculateMaxRectangleSum(2, 3, income); - calculate(2, 3, income); + calculate1(2, 3, income); } private static void calculateMaxRectangleSum(int m, int n, int[][] matrix) { @@ -106,4 +106,39 @@ public class Question3 { } + //二刷尝试:由于需要计算那一块的面积,但是不知道那一块的具体大小,所以考虑使用前缀和进行计算 + private static void calculate1(int m, int n, int[][] matrix) { + int[][] prefix = new int[m + 1][n + 1]; + + //构造前缀和数组 + for (int i = 1; i < m + 1; i++) { + for (int j = 1; j < n + 1; j++) { + prefix[i][j] = prefix[i - 1][j] + prefix[i][j - 1] - prefix[i - 1][j - 1] + matrix[i - 1][j - 1]; + } + } + + //挨个遍历寻找面积最大值 + int result = Integer.MIN_VALUE; + int edge = 0; + + for (int i = 1; i < m + 1; i++) { + for (int j = 1; j < n + 1; j++) {//左上角 + for (int k = i; k < m + 1; k++) { + for (int l = j; l < n + 1; l++) {//右下角 + int cur = prefix[k][l] - prefix[i - 1][l] - prefix[k][j - 1] + prefix[i - 1][j - 1]; + if (cur > result) { + result = cur; + edge = (k - i + 1) * (l - j + 1); + } + } + } + } + } + + System.out.println(edge + " " + result); + + + } + + } diff --git a/TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/HI_create/HI_create.py b/TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/HI_create/HI_create.py new file mode 100644 index 0000000..0e9f033 --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/HI_create/HI_create.py @@ -0,0 +1,146 @@ +import tensorflow as tf +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt + +# 数据导入 +data = np.load("../data/HI_DATA/HI_data.npy") +print(data.shape) # (2803, 2560) +(samples, dims) = data.shape + +# # 24个指标建立 +fs = 25.6 * 1000 +T1 = np.mean(data, axis=1) +print(T1.shape) +# +T2 = np.sqrt(np.var(data, axis=1)) + +T3 = np.mean(np.sqrt(np.abs(data)), axis=1) ** 2 + +T4 = np.sqrt(np.mean(data ** 2, axis=1)) + +T5 = np.max(np.abs(data), axis=1) + +T6 = np.mean((data - np.broadcast_to(np.expand_dims(T1, axis=1), (samples, dims))) ** 3, axis=1) / (T4 ** 3) + +T7 = np.mean((data - np.broadcast_to(np.expand_dims(T1, axis=1), (samples, dims))) ** 4, axis=1) / (T4 ** 4) + +T8 = T5 / T4 + +T9 = T5 / T3 + +T10 = T4 / np.mean(np.abs(data), axis=1) + +T11 = T5 / np.mean(np.abs(data), axis=1) + +# 频域 +sk = np.abs(np.fft.rfft(data, axis=1) * 2 / dims) +sk = sk[:, 0:-1] # (2803,1280) +(samples, k) = sk.shape # (2803,1280) +print("data:", data) +print("sk:", sk) +fk = np.empty(shape=[samples, k]) + +for sample in range(samples): + for i in range(k): + fk[sample][i] = (fs / dims) * (i + 1) +# print(fk) +# print(fk.shape) +# plt.plot(sk[1,:]) +# plt.xlim((0,k)) +# plt.ylim((0,1.5)) +# plt.show() +# print(sk.shape) + +F1 = np.mean(sk, axis=1) + +F2 = np.var(sk, axis=1) * k / (k - 1) + +F3 = np.mean((sk - np.broadcast_to(np.expand_dims(F1, axis=1), (samples, k))) ** 3, axis=1) / (np.sqrt(F2) ** 3) + +F4 = np.mean((sk - np.broadcast_to(np.expand_dims(F1, axis=1), (samples, k))) ** 4, axis=1) / (F2 ** 2) + +F5 = np.sum(np.multiply(fk, sk), axis=1) / np.sum(sk, axis=1) + +F6 = np.sqrt(np.mean(np.multiply((fk - np.broadcast_to(np.expand_dims(F5, axis=1), (samples, k))) ** 2, sk), axis=1)) + +F7 = np.sqrt(np.sum(np.multiply(fk ** 2, sk), axis=1) / np.sum(sk, axis=1)) + +F8 = np.sqrt(np.sum(np.multiply(fk ** 4, sk), axis=1) / np.sum(fk ** 2 * sk, axis=1)) + +F9 = np.sum(np.multiply(fk ** 2, sk), axis=1) / np.sqrt(np.sum(sk, axis=1) * np.sum(np.multiply(fk ** 4, sk), axis=1)) + +F10 = F6 / F5 + +F11 = np.mean(np.multiply((fk - np.broadcast_to(np.expand_dims(F5, axis=1), (samples, k))) ** 3, sk), axis=1) / ( + F6 ** 3) + +F12 = np.mean(np.multiply((fk - np.broadcast_to(np.expand_dims(F5, axis=1), (samples, k))) ** 4, sk), axis=1) / ( + F6 ** 4) + +F13 = np.mean(np.sqrt(np.abs(fk - np.broadcast_to(np.expand_dims(F5, axis=1), (samples, k)))) * sk, axis=1) / np.sqrt( + F6) + +# 归一化处理 +# T1 = (T1 - np.min(T1)) / (np.max(T1) - np.min(T1)) +# T2 = (T2 - np.min(T2)) / (np.max(T2) - np.min(T2)) +# T3 = (T3 - np.min(T3)) / (np.max(T3) - np.min(T3)) +# T4 = (T4 - np.min(T4)) / (np.max(T4) - np.min(T4)) +# T5 = (T5 - np.min(T5)) / (np.max(T5) - np.min(T5)) +# T6 = (T6 - np.min(T6)) / (np.max(T6) - np.min(T6)) +# T7 = (T7 - np.min(T7)) / (np.max(T7) - np.min(T7)) +# T8 = (T8 - np.min(T8)) / (np.max(T8) - np.min(T8)) +# T9 = (T9 - np.min(T9)) / (np.max(T9) - np.min(T9)) +# T10 = (T10 - np.min(T10)) / (np.max(T10) - np.min(T10)) +# T11 = (T11 - np.min(T11)) / (np.max(T11) - np.min(T11)) +# F1 = (F1 - np.min(F1)) / (np.max(F1) - np.min(F1)) +# F2 = (F2 - np.min(F2)) / (np.max(F2) - np.min(F2)) +# F3 = (F3 - np.min(F3)) / (np.max(F3) - np.min(F3)) +# F4 = (F4 - np.min(F4)) / (np.max(F4) - np.min(F4)) +# F5 = (F5 - np.min(F5)) / (np.max(F5) - np.min(F5)) +# F6 = (F6 - np.min(F6)) / (np.max(F6) - np.min(F6)) +# F7 = (F7 - np.min(F7)) / (np.max(F7) - np.min(F7)) +# F8 = (F8 - np.min(F8)) / (np.max(F8) - np.min(F8)) +# F9 = (F9 - np.min(F9)) / (np.max(F9) - np.min(F9)) +# F10 = (F10 - np.min(F10)) / (np.max(F10) - np.min(F10)) +# F11 = (F11 - np.min(F11)) / (np.max(F11) - np.min(F11)) +# F12 = (F12 - np.min(F12)) / (np.max(F12) - np.min(F12)) +# F13 = (F13 - np.min(F13)) / (np.max(F13) - np.min(F13)) +print(F5) +plt.plot(F5) +plt.show() + +# +# if __name__ == '__main__': +# +# T1 = np.expand_dims(T1, axis=1) +# T2 = np.expand_dims(T2, axis=1) +# T3 = np.expand_dims(T3, axis=1) +# T4 = np.expand_dims(T4, axis=1) +# T5 = np.expand_dims(T5, axis=1) +# T6 = np.expand_dims(T6, axis=1) +# T7 = np.expand_dims(T7, axis=1) +# T8 = np.expand_dims(T8, axis=1) +# T9 = np.expand_dims(T9, axis=1) +# T10 = np.expand_dims(T10, axis=1) +# T11 = np.expand_dims(T11, axis=1) +# F1 = np.expand_dims(F1, axis=1) +# F2 = np.expand_dims(F2, axis=1) +# F3 = np.expand_dims(F3, axis=1) +# F4 = np.expand_dims(F4, axis=1) +# F5 = np.expand_dims(F5, axis=1) +# F6 = np.expand_dims(F6, axis=1) +# F7 = np.expand_dims(F7, axis=1) +# F8 = np.expand_dims(F8, axis=1) +# F9 = np.expand_dims(F9, axis=1) +# F10 = np.expand_dims(F10, axis=1) +# F11 = np.expand_dims(F11, axis=1) +# F12 = np.expand_dims(F12, axis=1) +# F13 = np.expand_dims(F13, axis=1) +# feature_data=tf.concat([T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,F1,F2,F3,F4,F5,F6,F7,F8,F9,F10,F11,F12,F13],axis=1) +# np.save('feature_data.npy',feature_data) +# print(feature_data.shape) + + +# print(HI_data.shape) +# np.save("../data/HI_DATA/HIed_data.npy",HI_data) diff --git a/TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/HI_create/HI_merge.py b/TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/HI_create/HI_merge.py new file mode 100644 index 0000000..d46b5c3 --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/HI_create/HI_merge.py @@ -0,0 +1,62 @@ +import tensorflow as tf +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt + +# 数据读取 +# train_data = np.load("Select_data.npy") +# print(train_data.shape) +feature_data = np.load("../data/HI_DATA/feature_data.npy") +print(feature_data.shape) # (2803,24) +indexs=[1,2,3,11,20,23] +z = 0 +for index in indexs: + if z == 0: + Selected_data = feature_data[:, index] + else: + Selected_data = np.vstack([Selected_data, feature_data[:, index]]) + z += 1 +Selected_data = np.transpose(Selected_data, [1, 0]) # (2803,9) +train_data=Selected_data[1500:-1,:] +print(train_data.shape) + + +# 建立模型 +class model(): + + def __init__(self, input_shape=9): + self.input = input + pass + + def getModel(self, model_Type='ae'): + if model_Type == 'ae': + model = self.AE_model() + return model + else: + raise ValueError("模型尚未实现") + + def AE_model(self, input_shape=6): + input = tf.keras.Input(shape=input_shape) + d1 = tf.keras.layers.Dense(4)(input) + # d2 = tf.keras.layers.Dense(2, activation='relu')(d1) + d3 = tf.keras.layers.Dense(2, name='mid', activation='relu')(d1) + # d4 = tf.keras.layers.Dense(2, activation='relu')(d3) + d5 = tf.keras.layers.Dense(4)(d3) + d6 = tf.keras.layers.Dense(input_shape)(d5) + model = tf.keras.Model(inputs=input, outputs=d6) + return model + + +# HI指标训练和合成 +if __name__ == '__main__': + model = model(input_shape=6).getModel(model_Type='ae') + model.compile(optimizer=tf.optimizers.Adam(0.001), loss=tf.losses.mse, metrics=['acc']) + model.summary() + + history = model.fit(train_data, train_data, epochs=300, batch_size=100) + HI_merge_data = tf.keras.models.Model(inputs=model.input, outputs=model.get_layer('mid').output).predict(train_data) + print(HI_merge_data) + acc = np.array(history.history.get('acc')) + # if acc[299] > 0.9: + np.save("HI_merge_data.npy", HI_merge_data) + model.save("AE_model.h5") diff --git a/TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/HI_create/HI_merge_data.npy b/TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/HI_create/HI_merge_data.npy new file mode 100644 index 0000000000000000000000000000000000000000..74088b7c4844f4d8d6ac3cec753dcb0c8557b26d GIT binary patch literal 10544 zcmbW6`CHHV*T<<4+4m?CD#aj0Qnn ztPz=zY|%pZe9xo5;O>{_bB)7HgPx5 z9;~75?&G(@&uN*XkDsgC|MZicy#3w&)B7)RTJH9rtTTMXF#W+A`hx~*tkd}a{k3WK z-(R(T-*Nv#gX_Ho^BVgInx+gE44>ama8l?X!JGsYLEm9If=jk_6}&vLn_zekUBSXW z0|h6}A0s&VHF4~nr-;1bk(S`3ZNxEHOumB4>Y`WE=^}U}Z-8J>zy!hDCr1lzA49#b zN|CQF87g|)8$AWbR817rU9Tpr63S*nP9${b>o;SLIZK!+j4*JL`gx}IK{KE5zyYRcG4<@vXyUIP|8HT`J0_leDeW^${KM}Or?U@tS*A(eA$zFv z;%eq)TQy1Sr>wOW?7r7l(0I{2!N6fF1j}QV3*KL}Kyc&=?7hzt&)t%_=#-;3I!Ar$ zH^YO&b?PvNu^!-ny!Lihk)) z_WN`zzC+bU(?!3vEByYv#r?e9#~j9n(05=Gb-E0pAJsqMxkotnmGzeSo!HJj7Pq86 zr6=%PZU*MeBA#+FcdclCWK_pfC|zdFiR67PH@^*3Y?|KfFcjC#smyl6we9)=!bua(6d4w_QV}Iz*qc2vR4WA3IAR2ST=b!y`bLK^-Dy5^&0nHlWQ`HQ@N&u@aOyTbRO&F&(XPjM0S+T$j8E+<&$Wi>#ThQNj?e$_e_yh4T+kiDCPNE-g%e`bS=Du6B<*uV@oAQl--)nOn#a?F+ z^Rphod(b87dmXe?-cN34pY~5{n)fyKVUH5B@0X^%KSiEtg~S{00^Z_@mEB^uX^TU@eAz8J&ydjPGrwn*f-1#61n}LHG-`_uz&6A zh#z&rTl5La`ve2@;P>@5_;M^_uI_E&qh%0#q3gXt?5)rH2rk*l_dzL+dQ9Da6}_H6 zc3QgwMYcW9`v4c_7Uu`wLD76~=7h5^8cy_+zZ-t9M1_ifjX86;s=#;c&#rvO-fbi9 z*h==yy9@W^P{p3kb79Zc4unsGAol96GkwOFnn`@`$@o9cpf8(N^wBYk`Wrg2|9>1~ z53-u}!*dsWR{pU};4*QmE$R5-yZWH^`9rRhzzv=sMX0_-wu7nBB z&1U{J#(pAi`2tU8=TrY!A9$S9jXH0K(@$C;dmC-RoSc2hyKD^g&$izw@d7vCmoPI( z{BoB{L_x1zW}b&2sGf{}wLtdZq1x4eO}SIF!2QnNnAkE^)jTbGKiY(a*E1 zapIS0wN6lBDD`<-f(L4-e`C8SdGCB1K93r3higyL*TdO-pK2mjiru~_?)JwQ_T6IZ z7SZQCg11@q%&Xa1_Oj*?by_@!5APo6*M6bTxGemfzwvvK8-RU5eT>At{gb>!liADF zmDFEZLY*J}CGKcj@a`-6a-7Ltw%eK_anj9*J0^a!$hmgJ8J-v~@`y0}PN%@lrXy)r=vqxk- zQ}V~Oq3@k#@Skn6Ti!Q}i56_BP9M`fsoP(XeI7lYIC;M8L4zB-RWG5><3;4}>3+Qun75UR{VrSX{|GRGR``MU#crbLY_-#5%--jK^H=xP>>L=~^!wI=p*N_sIV`o|}kvqw~{ z$N|%sOW&PWMNZH=ELi&#UQJ#d5?L#sd|I8TD?Wohdbk{v_rxXmnI)%*{ID3l0?d+# z_X#`y`QX@-{QfisG5@ZY_)Z=?4o~NnXHcK^I_4CU2QTMekvFA|K3rep*QF10 z>-Q0P_6hc(WFT``XG|WwQN$f##k^d;Vi&dwz2Y_W1-0-HyZ4Cnt?{$G>&L+J zlfm3ufC~P1=h0tE2zwOzkh`)}q~8<^>NJg|uMjovNADi;vP>}SS`=;M36?^8rDUteJHn6{=ikN%zEuft?aqTuxcZCPvg@}CWzy2$CeQw@G$$uc4 zdL2XHGd7j)j!tvt@?i}1>D^}zC6)9Y|NWNuUpSa4Si6$p9N7Kj3mnX#T zoDDo|D8jDK8|HR-KHn*ikaV&0aQ#EjWixZMn{rd+jqjM3_G<3C{sp{uXih&vJcyrM zLcS#(?~7mAK=#Mcox0kEFqf^@;Za@jlGs%|rSH@{Hekb8KvD-J1=l04Mj4>jg!=KcDM+N&{O~3O4E;08J zC#lcD1m3jY!tW*r>Y3V*CGq_4W2bINKAR`-kx+%bk0ZbL}R6 zJ=ZYTPj{)u(T%yBT60zGXRKt;PgLcJ93M)als5Dq7Pm|EF@ES{T2rS{1^%B$kw5Jw zaebD#i@cNUx;(}3HU;V zzHMComiJD1{|F{dZ8rXY|KIW8+HH}&Ea}HBi2aQ4VSeih$a{D$_4hKpBlaDx?GS7X zg#Usd?kMH^zoO3@%HEAm=DRR6nfci4eIxHa`ut9$1><+ZpExrLn)G2!cap+9S9xPM zZU%Q%>dZXe4x-M>mG2~Or(M|X{SLqOX`e*@arO?!KU|Gis!PYI{bEO^gtt^4} z)$ec1d%I|UfAR~Tirl!8J#>iV9>R`shfag3-zNh8UUaV#`)8pIg6Y}prB`DMspr$A zwt}j&sQ>Uc`o7r?-alH_iC;xL{ETtpJsT-h->CXHqe7Tc_uU-mc#h!P?E# zWf`g@`PQsuZl^t;$$Rur<}!TjX_4RbBHr-FxgwW2vbXw!;3e`JcN}tx`u)@No8MaUI7M-HwrAK28*}p4I}m@x9{MqfB<_vX=(iOV zOTHEs+-I>DeCYkf-lnFKuWTaw-238Nv3FX^KBk0Ki(I8oo%X6HpYK_}!txoLn`SOY%78slJr&&;nQXKPQ{`gGMvQ)}7#CdjPyP*JGdJ4ZllW zHjB8w*CFtFB|}f@dDh6?PC0i&?ABVc-yQ9!cl~+#QoPE3omve>Y~|kpWw+jmpN1Lx zqWX$CuRDR??0@LXtZy61_hbU|@F=3b&9(4x=_Y+Ynoue6E*$0e!QX{_^gP4-cbxhx z?>@Wf)8RaIoH&es)(pM_n#ho-Fj%E3^1=z*J`rZAGcHR`ZIvN+*!;Xol9oV9xkU2 z$9~8OyP0cx1a-N@kUwSk6C;NweC?*k6*}Zs|ugD zqS!-&apdvwW`4sR=;y!!_Q%(d?~bW5^EU}p5k9qT(I3$!|Ks7zQ~9N>yeH2h{^9{$ zMOOOE{Eeo9Cy#az{r=*1f~vXNf}UrHe=V$!$T1_qIcj`gE^o6I{XB;rf?JPq4|~4A z&zy(wpxYWO?%GA{UhRa(&{6E;!C3OVYeD@}d$P|BqZGuxjVbq{-xE9I(_1h2J*w?TzwU>rZ*V5_Nxjld z;zUjuBbcbvUvN@$-qrlti#)@M{t^~3udh4s`*R%M@j)HnDN>KQwlvceKfTIPf~$`( z#|-PH-;q!S(f<A^!n?Jr6LqstoM(Hw>5e+8|}@ zCh~pjWx-xeRMQgu%gq{si$@y>s%=yg>=%lBY0of`iw4qlA*!i)1D z_Ak4T`+2`_VZ?jiQdm>mEi8Q2tn^DUV=&H;LRSvBA@EBRq&VSC_#lj zTLg=KUn!`zJ5uoUX5v*}StoM#))>K(1opTvV4KLDN39q1>-?ACr-T!NzBD2|nzbf7cPWd}mP<3{);K3~=g3i;j1x<903x26PA~>m(yyc2N zM2f{!nD67-B2HsSvOreNvq literal 0 HcmV?d00001 diff --git a/TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/HI_create/HI_score.png b/TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/HI_create/HI_score.png new file mode 100644 index 0000000000000000000000000000000000000000..ba0fe5ea8a71c8d8731d2b23f172c5b2c17a95a3 GIT binary patch literal 7960 zcmd^EcTiL5qCbGbs;ewZl@b(JKq(OcL0WXB=%OM5qEaO)Dkb!mKybZEC%P_0X-XCl z5m*F*)S!?=>4;K8fT2t0AJp7+Ehxw@&jq8LNss##vh_3kbSP)gMDZP&8*R zBn15;0cPRNH%Fl8%bRvVM-B>q2kqJ^^bK@35!wVDw}gbDANKqw+;%mfS}gsww)2bc zO3C~PXZ*&J?f#S&(;blC&iLHeo&p;y7Z(@jz*8NS2ZRiT;d|co^`&9Ig+{h!8v3jF z)_l$nAlS+X`)w*A^Ved-7PjaGPrf^7BLX=mH#9UnDJ@lA2{qXR{#O>sJDp|Z00}p4 zy1z=R3UFN!g^r*6iix(<5n5Sa=%oF!*E+YPMEj?_v$!cIW&{nr%w{SlB_-LqxG1r) zOrFPhM;02T4jp;ma1zQs9H{DBJ(e0aT}IRP60m3lnoH=gbb^dl{2r_1<~;l?Yc=4h(R*+>JN}V1kM-P#FBqVe&j5@=hi8DA(f?HxO zy8)Y+zX?(&$6pXy58jxIK_!0r^y%o(Y~4Z!eAoJKf^}0~ zPJjCJ>652VgC32h#3ztf8%v8zOOZEj+{mq}GRQxjb#c}oN_V$h%c$dT zCLiQ)lsvL-gdk_czZ9$w-T-QdZh2l>!Hy@mIBOy*4@X;<3x5S4gbEAoiaGnag`gGR zogq>p3Z6_bJg#7*G{B3y2iaPa-=cW+nLQDnqpYB%ne^f zRFug@CnsdMz=`A?_weDvn@*9D8&5jKp#umo2+y{3iTkRj}gOKA@6YT&$Iin}5?^ z>E)?9YIc>U~&B;o}qm_NG3CimuVr3CalP{Kf zodAYQoW(^T_qH$GdFAulJb1tK*M$cL1qV~sR~Lw2H9oaHkd%-BEO5(Ja*oeq697li z&;(N%{E{PH+JNqNj2^|C;Z)&yRbvs~KoBd}-gz=YjSUp!uVm0ZHgd1w*91=m`-aH8 zNJrfCs0ZgsyH4|QzOVw3TvyUdwzHZyO_XD?S?aG>*jy)vw!Q8Uq-{fS78Y$GR$!RY z%1)ZDCN}~hztv~3@}M_=2SuIAJLSl4!cy)yqrR=jFjI1hC<%dCMV!mvk6-5rs(!Y1 z{{eGB>K@xC0ebXy%aN9R@X**~qzn>~av0jB@YOUBreO$KF7_reQYBOo8ed^q!qmQd zq%t$)STl;2MQU66e;-{$kp5b?!`Qmx`~5DfwZB<>aCr-+8Db(i$Z7757N>~Ht}iNB(@Ha2du9Sj=7j5vAEi8es%Iq?^T=naur zJv}}1;NW1rLx-q0MZ_2(0wAB~$x)GQ4|LUlo{es>@%ZV%O0UfP&5$|)C@;M>6!Yvt zxhe_6L{trjIN`=L+~QJGQ^Uqm96gq&2bvIpK<=75qn69VCJPHjtD>f=a)C^CWEmTD z+y|mn8o#Bn*r|#dFRkG*){-J2QRXv53R@oVgz2d4N^9zF9-dpvOxoaaXN6I}rsQ~iC>gw>WWxz;=VRIwD({SU)%(%_lWSV6g=5B{| zVi}SKOkAMJV%ISaS->Nwts8A@Y;^ba8BB;m2fFQ>w~oBIuf_s+v2}7XzJ~Sj@d*kI z700&0Peswi^QGdUmmLe-wWq$8CNbs`4KAKI4#&{L(6*S38SmkgNUv z?R>2_|IJIUZf^sY&I9nJ|5R}m6?Q*PRlZ5XwV`eS(tg7=!QDec#(u{Bqs>oHqEK|% zISGwu-K%Y}C+@3mzc4fP`SZ@_`@_1y6pxy+!)@NZd-tt(2HXw^ zs^8}}&2B?eZGMUT3 zqSqEbbKclQ9dr#JO9|tzjhHoS3@b~*7=CAEK0k^@+s*w{SlG-{z;fyct9_U`xZ zr5DCK3CqjN>AIe$&CLNvS$KMmH}c7=8RVTxN=lpuoj6q)&GX*@h9Y2RtJTc7)txC* zyCEns0qm97Scjr>l&20wcQ)v=!?iKqyhbsz10SF3Ku7Y5(*Mkx*x8ad3Hw!Q_{j5z zncssj4>2js)%-V7o|~4MI5GJgIRW|=}zm~-vitn z$~Fl<*pY)$uB@!QMe6JAwG0jA{!Xh~n=InYHHp)*O|c&HAK#=c1(N`z_4V~ds@B%l zBsziRVF0&)++RNyU~MaPjAUUBJD1sdE~CHVx*Z+2(4H14nCdM~U@#arhvFpFEKPKw z#<1GDywW0i;!a(Ux4)e@c8(Vj5pn0)R!j3tvq+;IN=8yv$%=7e%D4k6DsTX_uxJUd|sLymmy51fLngM`^uB_CL;#J{| zlM$x!%KW()npY4M?e7RU>I0gmoZQ|e|g5U`ymL{6j2 zF2DK3FyQ0sKfJywROLG?uW{~KY>I{>wJvKq_a5>Y)Ehl4!2-9KGeJZ`i7g0P3K*QmsNwXzd2u~!vCDZ-^X7e2v&T!E>*rR zi_6P9=%+Icbw&q(>N{C%`ZIJ|Be+YYc+*+3fWI=>*C3-TSfMdFQ}zxHly`-8V#5%m z`VeGxdT*}`cR17y?gV7Co#kvyIOahiA(mcVUgmYx5!{{}zFv0J+UM9XT|Q_kWI!yz z%POzFwh}!jURpa_^Q5qF-w+FegNS6bb((gPUBPL}r}hkDQIW={&KzYB_;oxxGGk*W zCMV?=A4a>awm6Q`s{=WWBI3;=;wA@vk6fJ}E5mYUIS6IQoo;^;vNVSs&bi`?`qSeK z`Cy*qO4D-)l60_G|8^GyrP+K%Z}hLw*e^oXm;&>Ef395p;pjNp*QnUn>gK<;g#4#b zpGii^D<}|aYK$1b&r*OT1}X>AabY_2S6kg%>ZT0Kghhgkg|)Rc*cCf!1soQQRZ=?} z4=z$b12B5e3h^cS8Z>Z{{s7!b$alC#|J<|l5+L{|hA(`gkbH)68yd_)ru%iK`NpBM zi38=HG;q;_Q5Oyu0W0qc%%t7cM`dZ@0idMSucnJ6-uc5=vp&QqBP!~vs}Ju$m>5p$ zpw>T#e=*x6V|JZxcEn`i%KOqN={{HN%w05`(aB`|2n&cI*m9tI`+Q8!iL5yAfw;ON zI{f~D?w-0*L2bFN{}^|vmuB!wndMUTTrW@lCD*^LAx2`4(WPS@^=y_gJ%!CWKI>(! z`ijfhziJ%(I?_dp*>q{PNfdKC_!rCUQ-Q0=i zFQl7At_?+Hb@%r5$u2D|0bd`NZWKKEBGM(37Z5Pp+7ymd9*>#^{vGDMnwpvb!r4Q> zc>javZwc)NnJZ=(4-AP;W|JqMO(z2E672qD_t>WQhjdtNIEEQIiC1sPmZ#HqkL=dB`fAGVbRPxZpp`6xx!E%AY_Csiqw4(}Vp z_z8_p<1AimENb;WYUkqkqx$+tMYU%qd0aL@3=y^~YLTdHVP$2NX&kBzJTeOmTZ9+J z&{f^4`l}PBzVe!nk5a{qi1Nx2*!iYi&uys!j(IfI$#)-D?z6c$ryUa&_+?W|>3Vi( znh91r#l3lBNc;tmK&1eEXg2*NWvY0*K|S6BXa^++=ws@Y?32La#>c9I69G4=+vBEa z+SZXx0iXpi#I5-085oEUGU#+gF>!=ukNsiTcEi)ZefxHfES@9F8T2Ym!TuJMtc*eo z4-enU761dxiNUYLWD8iAnGKQttRw?!y8%-XznX^MX=-W;7IqZqu%&r(#q}PQR}0x? zdmy#DpwXwUE{qdNq>!%V!NEb$%xm}9r?8up`F47L37FDt37~jvdLya`KRo`&@Ax%8QNVA;4*5I?*99R~;_wzLcV%D%b-* z)lLVqIqgKDz5Jp-oDOs%z}bZ;p9eCMyQcGt0}KJ5OGEKlC`$mGoocOb7RV;4+AcwBF8t4_V}0Zb~BeO z*m&_dKSPv+W$2#UG_vX7m6l(xd3z@&Cnp~rGBz^GEi5cNX*brxWGaIYmH@~z1^cT# zz>^0k5W$joc4os(43?kv5=;u%{h&d4osDr`lWpgGQc{Plh~1pt%zUhIwnp5M@n#4)ZNn~nZu)x=Yq~L73tOs3Ia4yJ64S2HGU}_b|9M2i*i_XF^i#YEe}zQ>YZVX3RSK?CoQ##MG^?4})~(OCjvY5@fqWeb2J z%Z}pKCV=9J1dp@~w>qC&US3W|+k`b`EySufti3PYG0h6^Jg+WoVVj5#5r)oa!-?wa h`qF>TQ7d2B1bu({;2sn>kcOZW7N^Ziesur&zX0mf%!~j4 literal 0 HcmV?d00001 diff --git a/TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/HI_create/HI_select.py b/TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/HI_create/HI_select.py new file mode 100644 index 0000000..a81c40d --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/HI_create/HI_select.py @@ -0,0 +1,98 @@ +import tensorflow as tf +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt + +# 数据导入 +feature_data = np.load("../data/HI_DATA/feature_data.npy") +print(feature_data.shape) # (2803,24) +feature_data = np.transpose(feature_data, [1, 0]) +print(feature_data.shape) # (24,2803) + + +# data.shape:(24,2803) +class HI_select(): + + def __init__(self, data): + self.data = data + + def getScore(self): + score = (self.getTred() + self.getMon() + self.getScale()) / 2 + print(score.shape) + return score + + def getTred(self): + h = self.data + (features, dims) = h.shape + h_mean = np.mean(h, axis=1) + tk = np.broadcast_to(np.expand_dims(np.arange(dims), axis=0), (features, dims)) # (24,2803) + tk_mean = np.mean(tk, axis=1) + tred = np.abs(np.sum(np.multiply((h - np.broadcast_to(np.expand_dims(h_mean, axis=1), (features, dims))), + (tk - np.broadcast_to(np.expand_dims(tk_mean, axis=1), (features, dims)))), + axis=1)) / np.sqrt( + np.sum((h - np.broadcast_to(np.expand_dims(h_mean, axis=1), (features, dims))) ** 2, axis=1) * np.sum( + (tk - np.broadcast_to(np.expand_dims(tk_mean, axis=1), (features, dims))) ** 2, axis=1)) + # print(tred) + + tred = np.expand_dims(tred, axis=1) + # print("tred.shape:", tred.shape) + return tred + + # 单调性 + def getMon(self): + h = self.data + (features, dims) = h.shape + mon = np.empty(shape=[24, 1]) + for feature in range(features): + positive = 0 + negative = 0 + for dim in range(dims): + if dim + 1 >= dims: + break + if h[feature][dim + 1] - h[feature][dim] > 0: + positive += 1 + if h[feature][dim + 1] - h[feature][dim] < 0: + negative += 1 + # print("positive:",positive) + # print("negetive:",negative) + mon[feature] = np.abs((positive - negative) / (dims - 1)) + # print(mon[feature]) + # print(mon) + # print("mon.shape",mon.shape) + return mon + + # 尺度相似性 + def getScale(self): + scale = np.zeros(shape=[24, 1]) + return scale + + +if __name__ == '__main__': + scores = HI_select(feature_data).getScore() + (feature, score) = scores.shape + scores = np.ravel(scores) + print(scores.shape) + + # 归一化处理 + # scores = (scores - np.min(scores)) / (np.max(scores) - np.min(scores)) + # score图 + plt.bar(range(feature),scores,color=['r','g','b','c','m','y']) + plt.show() + + # 获取前9个最大值的索引 + # print(scores) + # indexs = np.argpartition(scores, -12)[-12:] # [ 1 23 16 9 19 20 2 22 18] 自选【1,2,3,11,20,23】 备选【9,16,18】 + # print(indexs) + # # 选出所需的data + # Selected_data = [] + # feature_data = np.transpose(feature_data, [1, 0]) # (2803,24) + # z = 0 + # for index in indexs: + # if z == 0: + # Selected_data = feature_data[:, index] + # else: + # Selected_data = np.vstack([Selected_data, feature_data[:, index]]) + # z += 1 + # Selected_data=np.transpose(Selected_data,[1,0]) #(2803,9) + # # np.save("Select_data.npy",Selected_data) + # print(Selected_data.shape) diff --git a/TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/HI_create/Select_data.npy b/TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/HI_create/Select_data.npy new file mode 100644 index 0000000000000000000000000000000000000000..9aabb93865ab5cfe5ed5e794e439e9b278db3b6d GIT binary patch literal 134672 zcmbSQhg;77`_0HK&8J=3dx^~R+>gCy8O4XBkP!)yQmIr*+I#Q4RN8wll#x9vvO>uA zdwu_fpX+j6JnQ|s&wb9h$17yxwhf!j$MHGvIcZq$KWSyJu~1P12j*z#Dry`!Zhy+& z@|eYOd+YuGdwr|@>HX~GlQx!i``LdrjpiH9)>WKkq@}BPUh)6;XWEvP+t8e0ii1L% z;QVnr+GNd<@opCurR;!@s~LDfJCG*51IBmE;269S0f#r^qxLrZ9@_y&y=}-JGs9#_ zQ<&TA!u-G;*m-IbtX>_2)=qOo7VN-x*VCA`&l+1dABLQ=4YZ2(L7G{EbwdbKEyQa#Vi|ZWd7TOCTmNm{yU1>E*2Q&*dRQ~8tXTj5F&T9_*FKeS=hc2`a8lZsjAo8aU8bS@=&hq_dn;!bs8X;(n7M?_ELn@iY zoUIGtzk0|OHbAna1}5lgp}0i@Av&|bTcHQH>9b&~V1UZc+Q^nQLdj-bm_Z9hZL_c} zQ5RVswecoK9|~F8$bY7fCvOd~GD;uE_-0|Pr~x#r^bmVi7pt=LF!8P)IIVg}x6wv@ z{cKd6RL0U?U3}NnK|E%o(N7Im+%$2S<)?J1K2*DO;k8l=n*y|P@RT7;-1TsQYXE*X zLj-*>z}f|KA=ajic`R@8)^kw#lI7LY0Jpugk>WNRChKRzeWO135@y55NgrW@+PHMY z5Eg!#uwKr7D|-%H?lK*HG(>;EEPTJFhHj5Jm^yD3n*Zt`p*LmP{0Te2Rq4Gr-)QWLhthBzFn2gf~gz!TMj z(|iNya`Z6&r5tifV58DHc;AXFj zvNam8%``x8u`b3;XJI#o@oAn8vU+roF2FRiMi)*gOhcoyVevu-!!kO^2~%hrE8+j~_rDqM0;G~PYFSPK@P6uPhwUIP`HeOFu#S$+)SX63Yj*u=o^>iVbt&bpf zo&(3UknL`OO-z5SrF!@!YVrCA61K01g^(1Qqc5&l*K zi?*s_Ub7bNZ_wB^yqvUA(rJKy57a^XRIzro8cr8#qeoc- z!7Tq_XEc!Erv*8tv!`{M*!NZ+ORj0)n1%*QOboEMT^S{+`jBJ4(|tt)f6O!x|3MSV zhAJ3smB(6TRjhW^z>ZEute-R!_6rnocd;_^XDT4;s|w!yF~to}Mfe13F#I3~ zpR1A>mnws;ogD1kp@5Ckq;Yb$Jc9c;5EzxguZtX{CMn|UWl3Z#VR>Lzoqfs zLkjD@$)f7F6xMpoMBYMC*q@Mtskba-D^(%apn!oL?Ag;~q5Dn-N?u|p$dd&pPy$C* zDd4`61Pc1aF)l?E&gW!c@kSDrx0GgH!Wo;(UW7)_f8|*RU9t>&W2aVqrYmE{gVbB6xgJ3{Ty}F{6A2HhdRB$^=R9 z-xERG7ZC)>h+$xyDEyVhkeVd`zvvlgY!*lBZbOWlDueKIvN)|OjrD@k@RJip>du+C zbxj(&X0n)*ErT;m?|1xFvFx)L`1+(U@0$e74@)5ZmL#N;Ww7^y7?ui1pv**yonH)l zUQ6QjZBg7?D1r4?L@>TX1l^}Oa24WUo-zmOmpSnElE5khMYOnzp>UrBOqNREM3^Y< zH;O?cO%m$|C9us&2AL6(Fvyj~_z#kpdXC{e$S`~&hBIBF$gO2qeBv`}kQF0=P8OS!l?3`D zg`wulK9?#1D=kULuM&quh%h!Pa-g`J#rI1ZlWjyGnmGe@{9@4hAqs@EQRMxGo38nu`tF1uNeq^DT%K^ z(ufU|h7jX&P?!WRtQSN4dnp*slZL=O3Hquf>9OB?nWtOJYb=1bh8OFhNoR?k%E-{ma3jD-uw!7R8C3;`qgKUqE6wxJ?+sMI6Ws z3!_4wgC{bQC=Qmyy%}QIK2ZYVhT@1TmO|oA31~kMgG7V`Mze%q87_>v2stDk5r@hY zVa(SPL**|?m~Liy{>Q-|ZRSO{#bNVF4F4bmD+M7uW!hZ7Rul)Nh0*X)3@dgptwc%U z{fQa4dt4YcLgF~7#(`;#Ad+$zUhjnwWh{nIdcxTJSQw_g;&}f-2o464$oj=_GZ%x$ zTm|qrLeO0!f|e5k_azPTtqCNPc}N#QF; z4E3E-h1LKy6VnweY zelQFZ4~V0CoG^SenfBfaV`jG?1g@;Y#lOPX`#=Yy4<%szO$cSnq!Ime2KbbxV^i}C zaQ29!T2vU?@5NwtL zLkz#n#1Oz@*4xg(w-^xw&XLCR@r;{dERR_dm}ek?h7eKkF>mMBXP!|cj#TD}VNOC2 z-7kVG^3rf>62a&kG03pkkGo6auNeo2Bcx$5P8^FJq>(jV0wRUd_%|elLiYX_XXc?K z2|qq52#_@58#r)qmw}0)0ybFAgx@JK7`aFwqD&Tr8j_g%gM()e<*;*>2;SY1fX_+B z^KVi(!s?@=k_@a}WiZ1~1`l^jVw!>|&R!CO`6Ef(WpR|}NgHKL4oFxttzDeSy zp)~S!lra#%YTHd|7>r5aPNgIUR>+`sjwBxTb5OcQ0f)Tg5Nk3M{R`w!uv7tR-W)7x z62<1#;&`1PjcO4wTN*aA$)R_;98CV3i7Ts=Q8gfkXEst;$n@AXmtpco8rE4#us_WD zkxTOE`lE~`Q8IXPQ4#-GAJzU@7SA*kaQrL>^;0DAh^veuQ!!j-&vm>Y2R8>9n1)Ni zaft{9w~3&hX?O338L-+YjUYW)h<%mE#vl&PnR5{EOBBL9X)Jif;u(-a%w$nSb44+Y z`A-+~aIJh{cuW?=?RGJ^@Qa}ACI|UqOs~vGQ}h_5u$f*^u!2qD!<4980uZqL}WLj|$(qZ~RJcAXX?khms{ zDHc*NfH>ZzNyAN60uBK(I5I^Ps-5CUv1Yz{Kngx_a!6))jVzGBv@{m`Z5ccgWV~lJ z>LRO6v0T=}vR>$8rz8R@#L>|zg>Z59Y$;J>JBwl3Jq|>yWsxQ>h~PBq7j=y#v9MGYPJdOPWGoMvH_X3- zBjUIcy!I%drd}P+M)KINpood>ijZLZ=`d13=L;pgyeE&(3#3u^M-K6J zia1QN(BGyE>0m{4s;MAWTM4DzvaAP?f?2vURzH-7#dk$aTdjb_I}{-@poo+$3b-{_ z6^~Wb@w-GG=cg%SS-ULwMpR(;PZ8(YtP!zE0mlw1;6&?8bhBPxWSKsc`qc1!n><=Y zl;IU2i|Kb}vO2ATjPt74IxrKpT5SFd(t*}VMcDW#<5ih5PP6aNb5O+ZOj*pGpoqF} z%6M={1-fi5neD>n7f(4H9;X0nPgxB8Q$YQD4IJWXL*cP38viMy;(#)C&(uKwCrx-h zRe^=ABHGzZ^`}h%i})4r=7K7=@T+5itrAxBC_r~e4&usMcxSJIqn{Pwyh8SGN;qdP52xqMoBpWd@J=<P*prVuLxZ`1+2TJ48dMy9NM6O6-QL@tUv`9!Za|0&7D%!a;)xZ zz;&4ttOFF__(u^d&&WaNk}4#oE8&fZB1TOVn7&l;XO#w~+*ibtd5l-0>iF8j=7}T~ zJX@lOqC7T-b}NBntBJm)ns_y&f|vek2ys?IK)NE5)~I3rY&FRIRD@#SEVzHsM95!t zY@Vcn`D~`QZDy*fRl=5FHApSchHJbE{F2x_d{Y}=ZZam?a$I-hQm{_WgfkWzO|E`L+ zY{xNk=^QL5&}Or`7Wzvx@uAij@oWd6psS83``I|qXb9d0BkZi7169Ad(2<=7gHy8+ zmZ=RZw#R5)u8ZIov(P(DA77ssvKp<2u5X&K3YrZoXFcdzaN*Is1dqM+vC4NAW?h_v zJ04tcI~hNZ%|aX74<(7~P7QPhMY*wOj++uv@@z9vI981@5 zarHZpwreHo`RAkh%?b=BZiU+5I;7p(1lz5IqnfLcptcqZ_iRD-1ZzANU&A=P5!<4d zz;t9axcipFKzAQ(lT2XWvktqPmf`2X3XK1;9edhLarp8wsE@8g@4Dp>G+vB{6PM!g z_N|zjxfZc2*P!O@GHi-nfimm0NWQubTG4CaB)b;<>sP~Oq6u_nY==tkW{9bop!V`c z*d8-MV9Z*mTql&&Y{4?YT~N7Wf;!>N==i=98NW>N!rB(Cw+~=n^merWFvYU9%P{Nc zW(+*rj^_F`u&UdETk-3_Uu_BJz1!f^wFaB)HsitXE%+$44Ng<{AkE5z?L0SOTILpv zpST688@6Jz*A}FTn&7CSDN4k);iiQrw)mUl%&$#oKE4UFYS*KF!A49M+>Ej#Ymm;q z=c2b6GM%PaJ9|66T-=F z0r}Q#*f@75p3K~ez85xlrFj_Eysfwk_N*Ec^cR`n%AMViu-=3EnqA0CJ%G&4|1f>J zC6=XIV*XWg7$qKnL%c0aOl=_j$Px~Pws_-m7<~fAFdA$Ezk7S}XPiB1X6=Kg@&Q;Z zvc~ORD^$l?;zYm!D1F%jnfbf$RcjB5c(yQZIf{gh2cYV40*@>$(0212^mF#$#}#wd zW7;E9doOgNPhj<}Ban?c2vLQDcr9QB!NT3}16 zC(bk+#qb+Tgp2IOhPr)N8)1&P*gg1t$O_Yp?Xhlx9rO(>khurpg7G2 znHI+&wbd0VHSP#bw?q3*H)zRSM0n!`_-Q(!<%kP#-V^qJj^mH$al}783#o}#$Xsv$ zhSSdA$V^*^=v;t`+5s%S<%nq6)Y?0Yz zk57~KW0%B9TwyV+vN;ZyZ^y7L#tBuH`>_4MNw~S4#FL{Zv0vQ**S{RWr^^m-E^|Wj zVn+mgaEHJyC)7FEVP1#>F4#Gu;l+L&+jj=#n)a~qK7dmXtzqB33o=Fa;61WKgtZ;A ziVt9m{ZZKda==sT(}y{FK>B(wtZ{Ha#-Ae)cySo519s2|@Ww28cRXzL!qY)VOx8bw{f9m9c9H}BnxBR5 zh#ee4-7w;Q8h>QZBh1Jhp?@!+#PlSl-8qBm@9t<8aDvXtLkQ|~Lx8srb zmtDf&wA0xB#SjbX=y5EQJ%UqF z{&+LT1$l#B*urq;W*@_(-7c``bwk#EH$cQ29U>R-Q_c;+8TM=s=!T^Q9@u}}9g*fv z$W`)1TheKGFFlRBoBXi5-37-VxFdfR%e~V%RBdv=?1oeDToHoIfMeM3+XD?`o9K+j(b`Uz@ zhZ$$s^UqvB`P4v+WnaMC5+77$dctM5BX0V6!dcr7|NVCvtKVLP>#s{lZSz2;jW2dh zKMF~EZ&u83t?o@znDYT4j9j{gw;L)4X6H;0nc? zmzY0zV(Ni#9P9SPx(~j1@x~X@f_@ly5rlYIA6%_+NBGN&xZ>&w>1=@h@I3M+Ib-aN3wWp8u{7;6n}@>TpzV%Nhdq(M`wE0k zdm!N0CA{+YMz>o49>@kD*dZAYOMPK*^)mPhd@=Si8llXyHZ609-&rp>XT;rC>;Z@x>u|f1Fbc!#aQFiCcUz-aiZ*>O=4+&KKwTeXwd; z1XMDj;r+n_S7m&Wo8<@D@&KIP!2EE1G?Zcjky0Fp5P2UQ@brPAnLqx$yaewwZ={?F zN3!`v1ZV~0-KG${WE}ghe+lDe+=y42X{vLxE;~<2XL}B-yFbJfl<5*EV)R|tq`oggM$7Q%#1fg-;CH!a%LupwA zViK~D!wtgk`UD6_hoj9d4sXo8kh#_y4QISzJt+)R4<_RJ+h}Y*7=@Q(k?<8vhPhc9 zQXErYY3c<{w<~Pc4}{o>06eq}#0vE+7;1Xs%V#ev4UdA(6<>7yOu>dPVYuWJ1wriu z?5hsKQwuMs?FoUy-9X&d3x_YmRl7F~!r%PZol`K|gNCBk)(=u&JuqomAk0L9;D0v= zZf`@dTkaBkha)jXIS9$lm)LzwAU^wCLe4H9NUiXLS8N3GR0ALtVyQROwSutxVgR0uPr#RVKCm|O#x2H~#s|T8mK%uKVOOA^9|teKC|tUH z1(&w?useoKHm@gQ(UdS$YlefCMB;2u0*c24VO&uvM3)BQ?q^SYKNN&5ZzB=4E{W}a z!(m*+a$n+$sf!{p-6t5@BC)8c3xm`2AXIEiKpu;m|7ZkuT0}yzFcR8(<1i~Y7&~p# zA-pI6@js%`kQayTH4(TH?2QwTFJMl94?HjWW6u2$JbUj5!2{m#oEVJu!vUzMi$g(9 zC_cA`VI#*MMc0FnXBL6`VG&q(#vk4RehB>%i%^EajA2i#t`5MzwIS?2A{L`BnjeaR>=3NC^ut^$KMddZfNM=8^8KT5V|Fa7FA40P`3hp*`@>~U z3@WmsQ0o^7A=3~T%B7%eUH}Z*{h_EDj2|(bS`q~H-x2s45P%)m(($Sz9CJ7! zNE*I`mRYZPW4Ovd97UmOg*gvrBE z;H$U{p`<|kMHH6CB>?yQanU>uqhsOV+Y^9k_lvRkMkuDgFG2NQK-xK_tg)EF$ri4&c)u9!PtEy z3RSPevGZvLUIoU%uq7GMQHgk!6p7z&GodjU2U-!05utFnyh+9Od0ptQ4Mk{dIj*ls zh20ZBY`yFX=ezOjUiLDUeGfxVMI`111tIw6W&C4aHQ6*0UJRG#&tlk)IS`Y+guuu( zgzX9=@O?`qLg&WA%`*%?B|~wCF9Ch>saOW=$d1O(n^%$kISI}K8BpjC$7bJh#1+J1yK@LS#)m_Fdn|tFCL*aI z87usP(Y)v~NG$^13JKWtv>3a0MPiK-)22firklp$SydQjehpz>or4=X4S0Pg6=z>$ zz{DyWJKp4=cx@Jfa+48IkN}k>)o2?kg6YC67}zBvVyqbdtUt#v@js5!-H+LgZd4)>LG}Unm4`L!+@JCVK=iI zD({-{$Rh@wo0*norl277CEjG_!Tez?_FBiHqv$CNmu5r6rXDG?qcBA`2H!f;*nN2y z3iPtjcJDeu@226N@=dI0kHXW@a=0?AG#}?9?b=JMd|d<0H)VKj-UTO>CcIyeg8s#Y za2jgGRgW?(Rj-2J{R~)*bz}Zu6O`DZ;q~lN7_2PAru;_qMK&Q~aUM>@x1wbI9cY}* zz|XWSG#$!E;Yc@P2lAlRP=}B8w^_Y>4LO}x=u>)vpQrEP@9sMEylTOOiBq_z*S6!1 z&NUow>qQ7(Cj!FSQTyR0!rWd!TjM7F-MoWL=?<(m?8KgdZaCz(!_c|`pZ30i6}Jo1 zIYk&Ne2PmBwK$gAg3hZqk$a#Qwxbmg4(P>ry>7Orzl}3a#ZVh)NB-hmm=;#THmCz$ zJE|Z*?lzKM7h|u(9dxg31!v10?D6S=ZQB4somx@-^bsU`>f!#m3a36+L*4BGtd#1& zC))`AZ%x>0+Ji&Q{V<M@Bd*qo}U|4`wl0r3?jVd37+!3g)Qe9_L{%O9IY{2 z-1`b%=LaEZ@(vP9AF?|yKJKrkDO_psQ8dI);@+Il58p*EU}5x_?OeuC?)U}j1N~T? z{TRH_0Z1qGVkw%DwfH%P^ln4Cs|95>@4=B7K*aMN_$o9b?CS&c{<#ker(5XTbQ_OX z-atp^F!tVlg31RYIQ@X1d!hXnCdR!+lIc4Hncc>@AJ1@q+!q9x_G8<$cPLL8gop48 z?BIXQdct0uZ0X0@F_shk=ZHG~3HxmBVV2u-tUG)Q_Z@Y(7m^1dp*@ak9y*5YfQb{Ic5euCfqL99=G1f7;=DC~KOJ8wVW4EHmpwD#gTuOI(C8-aTNQ=D)ZLuk}^ zZp(|;DB9GDIa8)_qqetULisIx_qm5V-nVdDs2}&jKVgOKJA9Lw%oREK4^z9RaQ6*- z#nd&waH9JSGS}UNtLOmky%>O6?oUWu|ADc0Uva@zfZJ^`fg5;f6bo8EK|%2oZa$pI zeQ@&&dd!|-;MyQA&m6|9!+&t(?W2MCtl;fiTvEGr$68(`4rWTf00+o&t1j-KD<7PC(Xmyy-0w&qTne6 z@`o^~egOQ6_hCOU3bOu!)?XjMO&QN!r7p_NaT4XO=l(|Df@$1jhW&*@{MmF&*(e)0GcyL@cSJfcX{Vc{C8j?_qX^XXkVJl{Wg9&_ju+zsD}%1oBMvi zbmJEY%^$*yw#O(N`hp`p{M^Snzwvx0KX>o@VYrV@=RQjm=g#mR!L6YwT+g%bkoa=~ z_gUIFuEW+T+zC8>?sxC;+)T@1$ma=g4_(;9UAuTa_m0AL?wVBM8oyr29nmx8zWcD7 zn-zD2`#WhHSG{5lSN56>H{k6GZsP_E?#sK*T(u3R+@>Qo+Q+@%@$-2YZ*a9y5yaSL7~acA*DxRGfWx!E5)xsnn= zTpj;>ZlSC{*LW<4>#(GV+j^sro21jh9W^fHu8ytdUQ(*&KHbvJHD2(Zo0`?eJr&!} z{nyvU{duCBYp3>uD>?LzD?V9*PG9`V<-7Wun`13T_g{c^>1zdfNA`&G)bb>GxGc%bZk@^V z?^fVxcS!RdnThaf-YfE|TW9cubHsS}Ld19q8^w4J4+!xlgb47q7D@B;b_($_FUs?( zJS2F{LsNKxa+16kgMz$n?NMsn^O`Qry+QpS8!7GgZQAs!mlUcyNi(LG!qzm=y$|>3 z%kp}fcD#YISF}*YP!qNFbT z;W9cGQBT{Sm6OM(T3VssMg?bEDByYnMI5f6m0FE-?nW~msi-Dn;bxj)T0;kCUZd3x z)wH^%g%0h$LFQt871{o z>nCa{$n83%vCsb})kbr7HdFPA8d_P|O4^_5si3!#xbNG^=4d-@Hm#;+nI_Vj+d#VC zSd2^BXrQNoV!PYua%~HJscfcPk7l~XcsQ@5f$D-9sJ*|2R)qCa_QI=VG^K$yU9BOV zKW*f^x19Pqt7*MmE#0+X=P78UBSqEp-LjtUYSmL<7voS*E%Bo2=-Ac6ODB6SOs0?Zzj|6rS!z5ib}3` zQ&CBYB_%cFKW^YI#89_XO3>g!baua<)4D@op|jTSGcBZrgqG^MAGPBXr>N41e6 z!`x|E1*zoJQsr1X&1UD?__Tr^J#M1Ya*Y)6wubf0oaS zoer!dGxsKn+I@}uJZk8?V;ymCHIdH3N_uv_jy8|9(iMG%Pgpe-+LTaQW;Gr1WEgld z9^I*-ip!1EH_}9j8>(o19Lr@=J$>8TOtlwE>7imB*+`VpWSeT@I@i$Azy{)KmyxA@ zCGDToOtQC&DX^@8Wb>=&aa;{yZUvc4E2Dv54HPb0L;XGF^z2p@J@c!jfbtq@`%*<) z#+Oi4buH=aswVyumGsT8oJu1bTR;M{a>)F30dWk{>5Fa_Z3#~&g{mCtNlBxL>3L+EpGAMO)965CAyvtzlkcuv znzW^szDZ`%`Sf(EsmP(1)v2Vbkx#~o2~_NpNqjxobYHKO63YrmKQe)$m04T`$)qG0 zPq%8aY2CXdO1CQ_sn{fn?T@9`d*kS)YBoJxpG0k&Gf6ltmpVUWQ_zh#8lGB6@3y5< z*!?UL@Jb-t4GHw+KsFs0OQSuDGpS}UgAxaGsb^_E-QHSA5tEY0%_D`>FJzP3+e8YO z#@=r(qqiT6$U!fcA{6sz$h(k){uPqM^$c>VjG@W56UjBCDB+G?1N4&%edcznmxjcIZkCXRNG6Hf zF$}k+)A;LYls*_q&&Op@aA*n@b!5|ai4;o7DWZ#;%gFpg9372Jq9u)~EH?>+h73AB zoI{BsiR6ASiFQ6or>`B!HCxD`-y03)JY}3%1jy-jiK9*5=oQd=sBxjQ#>+hgJvF?H>Hx==R`_96;D&`q9~2U z7t|Y1fhI|GO(d1PT~o;wptFqu02<7xS!WRjbiNq2P8>BUe2 zrKKlPds94>ERCbDO39QsD~afB6ixHYpn<>$+B_OV89B*hC>=p(S0>QV*)%HUPo_r_ zX>@f}GFgWv5PwJ_IW;9x!|EuSBp6LR!xZv=n@V?-QixwFp2D6bQ>#-tbq_M0J0{Q( z7VC$)I5Mt_r4`FkDOo+4{QqTA{yvt!xoM;(5JUFkqG$$xI=Sj3Qt_o^N?#jEWB!qJ z>V7hP8)7-*CDV~NDYT2}vSDKeMK4RFKTI#FkufwfluY|~Wzf``RC->YM(fQoXi{Yc zm1ktoMCJqES7%eXe=QyEO<>-hL_^BiwA4J0v<O13^b|!@~pZFY~L*Bo$$=o)X zL~|1<%sqylt!8<9ok^=MrO@4{$)tE7h5Q8KNXR>$d@d%CdSN{MxDZblq*BTGXf~Y~ zNTa_GbLl+e@Vworq{_)4^^b8hcTE=kJCi`y@5R%(0>;JL@lVZzhWu>dn!GhmrNDJLCdpw;S zMbjwvR3c?ONT$+1$<#b9mE83bsC6WcrhkYcfzA|4Sj%$inm{X%PQ%@awD4;>&E<=x zXEyP)?{Xr2k4&Op9ubtqji*qz6zWS%Azht#I`Te|HaKR`iWLb|7LiUa#)&kYk8x8q zi=NJB-fWaWH<)(jTudYjrspu`kZmE;1}E(`dhV3Z1h` zrrq0;DDYwi?M=)jyWvc-lNqG8)mfAsnnqS(nG|Hkyth7%65SH1a(5!_^JN-j++DIT zo`QT6=mEpkqnhR2AeBn5BvZ0gJh@Hmp%U3R+A%Gkej8;F$2E~E>~ks0CxHxhN7K9P zM0yyXLBH&hNo89aIVdF23AIF8FplLeD~UEYWYe7AX=GlSOB|bgS}B@IfvMRvFq%R; zr>Bujc^(~R`R4nVPO`I7==J+_QZ`E_=lDWeS`tsEBT`7%o#oLgmSkqIb9E(>zE%dc zugIl6^Rj5yNCNprXV9Yw`SjN(hvcoYNq=q@{kJEL4)JG@)cQ1%(}^WHy*!d-T27pk zN=FOwC`dMoR_a7k>5o*Z&R~AuSwyx+(n!}WjRK9+=)~*{nszja485~y%DpUd>?ouo zS?SdOE|2vU1@!w3`7fE;oinLWJ&*Pk7m-X<1_^%1r*XoSRHk~3G(M-1KJ#UZX}Ki* zGLIgxdON|TlqT&2fANxFwq>ECcM zEvhY`1BzE^cv2ySD-^QlqCgG#k=*aPW@~JMM$cs6&bWt94Y-Z2W%BOuY z%&S66>Arsn#mbh`wEAK)xm`jADp_kbc&%&uuKB!zl%91}UVh_oZ~j zyM+EE7n7a~o2B-a(wWF>RI;j;^%(^u?vzhor*(dFQmWyvpmlOp zR8Qr!IH`<6J@ZKTZ2{f&FQjsm&~E0NzAdHX_dS;uT3w?ZGDT#(yM&T%mQl#33K9^g zA_<))@=~fGi?yZHcr24f{+3W`!8P(6tEWcx-TaVRGGEq2zj?(pVp~nwE6YjCqLjXD ztfv=kSE*!SA-Ov?(u#MP)MK4b>#NJi@mB?{xK~Q+J5woiemafYl10Tz>6G47NZYb1 z$RVJNvNTJn?S38&=I4@N=~a5lmrt$NuTpzeCN2GzO)HvnXm(!?l`(yvNlK=!_H42{ zpGMn-@+pDU=sN3cde@Rg(!Z|Kgms0imZVVpi7fJ&oK4Gub1A^6lpb8or{`PJ=)#vm z;(jio&ewVLyg8lZFXzz6wQ{OZ&!s~Rr4*u5OeY!&D1(>~QlHIMjoQ>oB4jr4Zp(@W+7V_bGt)?2N%XZ+L2C+E}*x_&E-;qPj~da7PV2U3`_~;;xb+>r)TjDWnAb0+N&{pcT$p zbXuc~R11n|jddwCupUJ@te7lUrIPZARGPRdm&T12Qj%2#@yxQQ;Xx4{87U$Y{vs+l zkxL`1@~C}zB_-@DrFF*&DSdtgZ3-x%j0F`W&3f>+%S)(EpY`$?#k5SdfmSo^RhYBc zFTH_6a?8lox|#IZT=T)Jfx6pnQHpsTy)kd1C$pRAFSm)z&el`*#2Wfk$>w|03W|5F zqcw4LWcs#}yw&RH{exDDTGmAd)$P<6xO zhHCnh-A19gZFKlv4M}dVr;RK&zv=a~j8jE;SxzOd%juf`b=tA(4js!cqx=O8lr_Gd zLXLFP+lALDY(W!c+-#t}YPK^lsi%e~)g&>!g~S(k((wF7dcLckKK8Jkhj=fQIW|*E zV+%E2?I4j?HS|)um11>TX#bl=3S9KRS+0Z>%&KW_LNmE!G*SP$3gX1F-NU<9dd2Wa z^J%9^(Jd5YQbzSxyXc|gT~bV_p;3cdl-6~V(i(foI_?(9%xtH_inSyw(nAY|YH6)Q z8wuxBk$7V#RR~?DuRUFKa6&iRA@tCTiOnR;&VHo5i6-z@k&b%}9b)^T=;sxrCD2Ky z=d#(`qnnc5y6Et@8nU2nYMszTMUia35Z*-B-m$&RXbY9=)={^216`lWFi`9y{WrbT z!)YN|yB3P{?jpsEyEGC|M-655)cmuDR8*TNbyE-dKD|zpC$T+~bTh4eSW7*+wPecX z;*;Z=X^Ln)ji393GDL4u)TIsz_vxbCwe93r*GmN!_4IgSDl*m zFkDSFuUhE*jV^lD(@pO=cgbh0g<8L|TvfEt@RuI?p)^caB7516>n@p#-=(uU9rStq zb!xfXM-PnaY0KO;@|)L1D?c~V7lT%kv2LZ`Z4YUp%XMn*eN1M(?D>q-_PLGp;BXz? zS8Jxv`x~hw`!@aR>Y(O|CXzeYO1jQH^!`vUximeXw@2^Lr-&PDX6zwJlNS^^(oGI4 zyD01aQ>wjsmuat`Mx0-f&7wilFnL3X@-OJj{W~MYLd)mW zwf-Gd92%f$>tB+A^IhtfzD;xbUeMLtK02j;j|@)UrRS4h(B73#DSE*}8VY<#f23d0 zfblCDzC51iDF2*dl*VYk?N{1OW3=?_duk9LpeXy#l#~CG&MY4$Ti+jKF!X|ySA3`X z{S$fG;p2JR=S<)|i5{c`hc9%D`#(_r!;jO&qMYPx&IO1iHA<(eXAAYy)qQ#ISWkS6>Q|^_49`5|2Vkr zxEkC4pOKLf5+WoaGlb0E$GGgBt?U^xvbTu#IHw(*`|Q~+3MqT9$Ckaf?9K1}{r%zP zaXR;XU)SgJUe|SBJ#JNgdoQS54qjJ(dN=7^2^nubcyFv-9q-&L;cNbP@2(I2dEc*7 z9$yW-SE?P#wWx>{SATf>`ISS(D&M`8o&S2*+EyNS?tk}g82y#&kAL2_Q|h9gyb6Xp zeD=;?`pdg#=|}IQt!*&w@eglRk1yVu)<3=5owUK$_vP^Npbe^5DUX{Es=~7t$F;jM zrkmb-&%RsVruPLNbK{^`%qCWocwEtV_$xIrzSb#gKs57*Zt+~eXSxM9Vmx`KWo5e zZ37JY(EueUD&TWs4fL8?7nO6qd9PkqAJg~Pp-I!aSd(7~t&UW{V(&_rSyUcHk@lF{ zzyaI8{Pdo+xh5*UFOTBOf4x_XtbkQZ?a56heK8IH{K3u z0~~R3vlHg%9dN6w3#MIefNfWtvBTCD^M2Pv?l%`a`dtIf7S%*XoIQd++9ULP4Seyh ziB%1%po@boHb1O@rpIbw@BjxK_N#@3Ud|}6R702Uj`-zhhvspW;MU9m6}7gAn^zx= z_t%Hl=$e?gp$=+jtHXV1Weo23+j}N4q~`a>ud9GPjT+*OuRVVJtc%7^>%i}BeQY~h z85bP*9%6?BhQ!su*s8T~eqB{;Si~_csRMng1FGg%N3W!+7?x2LlSN}(s#X_8m#U$e zS2fsHtcoqU6%f#{3d*+G;n4NQh}_-)O|z<@`K2Z>AE^edc7{rWj4eKV^uU-QWK-kH$lb0 z4PYJG3@a*C$NFl{xM#4(u$I-(cVjc8w{pNTS9@HzT@%AN&oL+JBY9?hj7x05k7Agd3lg%e6EG{vwP?8^XqwA496akeQIHfw~$108T? zSAAGlJ7GYH3!ap3if1=!V1cnI@<%qt_u(}$sX;A#3A9CmO#@u4+7K`IIACczCp_nT z)>m!<_x26owWkeQr8LD+cRTpJs1F7E(tToGEdE*tzJ=}K{kRiMe?1Wt?u^ALO(0v} z8o#d9!Dme?#Fux(;)^Z_?d*&=N7nRxOPv1O5IdK7z|-o@($*?S>K))5UWdEmRTC3ZiphrmJ)Sl7DZNNOuoKHLButGmIg zunA6eY=y7)-SJ;pTTGhnih=?cbY0ONxgT7xqkVNuzwL?uV-u8>w!!JgtzjA811;(| zf@|M4c%g8?svaH~s%(!*?#_rD-2yX18)CuJE*L)11HE>2!^tC^Pz)Cs2DL$0r?!~5 zdkil6xZzZq8>TFA#E`CTFdp$l!W1tACU?N>=Cv?kZVNnfY>l3Q-SOJc1>x;m;9NZ? z)EVsr?={ZIobH0YXBuK(yepPo>xF`wEm8T4E0pog5dF*z&1$)Q|sw-MQb;s6Y?igxmf;9U!IN{$OmR)WL|I`BY z3cKTah!fV$YlQZ(y>Mqw2Mn-pkNjJ&J zITn5HfN{zF5E0;rN8OrYf}JO#J^EtArRI2ftpmDUZUswBcdY!=8Rz@9$GYWCnAD>Y zBJ=sH%s)F|R{a*3c4RQd?P!m@@?Plb)(2zHxZ}X9PH_6+3A?DCkQH{t))u|syQd3m z*Z0P`Y~rs{56pFGj&h+*FtpeMHPhPS@x6A~b+Ipge{GHjV+X)>@<4pxaf^X1+N0f^ zE||Q!En?5~z=Idv;P_}N{zi4dQXaq6B=>^xR8PFNABOGEJL4_dqGh2cY_uMD^SmQg zZgPP++7*nd7`)dVULGFk6y}bjy*znOFI!C^Fp4DN|4 z|BXRV?JjuOU<6)%8G+!jT~NQlRD5eU7{`B(KV;ZUa|42OGRyF)ju4DODUfdYz) zBQVOv8@rUfv86%}?72J|`k7sj{IM-g59kf~NH1JF*bl>Y^uW{5;b_%zGzveogx{Z@ zXd5c9Y58zW+1C$~_H>1>svjOIM`L9|7bu*)kmuJAL7CooHQNJcm6I`~%^+mR2Ew^t zZ$!ED!k^uO@51|_&+Xpm(|H8Se`=4ahlb(Y>fX4z&l9z}0Dt%N#p{K`;F2&H({smS z?18a}@7^6B>wDwp&{1gCbRdf3Ho$G)2;3+fgtm&AnDk)~yq{0NgeRl%@5Cy&$4$no zO9Rj}Zvw_SlcEE%u{#*jg%S~wknF5^*d?l??+-vv)=^}x+aFoj(g1pwgC_iN&u3qxN zt9t!0ug^#{<#@b)0n?Xw!Cf;DQCC>M34?IM(FZwm`k;P|DfrB1Z)@s}pq^tft!aNu zKivmClLw+G!WRus4aIER-q_$c6B|wpM{qHz#+JtjU$77~vU&wAuL`vK&jO?-;)BlXdGQ|jV+B*zY&o02B!U?FTnveYs zQ!%sB6x4pa7~ikVLf?-5uni4>cfkzQn;eLH4`!puq;1HnG#xhii&3uMV#r7MLvk90 z$)Wwx?8_KbTR8=rKhD9~G;d7)F#^;6^F#8!saUkw7na2f@oV@J{JJ+9Zm(vcdG=}y z@m`9c@3XMB@_2OAZGgT19E^HD5`*f@!1(k4IR6MnzlED|-hL)rcaFv8roI?8ZVHsa zb20nVB6K+FgAH#dp~c4maNM~Dwa2f4`S@}qcie_rP7~02s2|o9twhVmOR;77VpRV; z7vHXgpyRIz=$$eZLk3Pqolrm6hb=>Ao4M#FTZR*V{BXr-CmxntjK)3-VN^{)NK_z- zErEC}0?;*j5ngTEg1u$ykrkc>|Ck-H_F9eu?n^Lh-6s5t9*!;UBM=)l7thisBK*h{ z?A0#BgYZop!yJq#UWcO-=V9yXDNt;Q!@P?#P&-P2<@J}KR9cU5{?jp8y&3xhR-;U; zhBba0uJ{MTAz}_vPcFuY?4j8HZ!X#`SqSsBRk%|y6XzGff#sW8tXCTw@rmCWhplPorQpwa}m~l z78aTVaKA$orV($?9}tsC^O3i16+%XALx?;ScV4NH`ep|re8cgp%O;HPv<1)3&c>~q zfoOUl7>DW)!Z^Em2+8-y_#5$`N(zMfG4_5 znEGW0%(MLw*kKYr5BEo})?2XVYyys)_QzwNiKzWB1%uUFv2UY4Ml0swtaTT@6{)e| zdN4MX+ll^tA`m+*96dVhMA_PhHcRPiG@mv#{FmE*k==p z^_!Q%>%mMYvm;O?DH0p%Zboml5_i-wII(&?68G)G*AB_>IF^KZm3LxAy{)i5TY}yH zCBgR62GmYlkBNo5;4;>XvyXz2yF3J)_bXtQZGpV!HhkjPOI&tf)%hUU+C<^NuhqCY zVLGl|Sc6XE*22|!H;Q8t@oBIGbK`J0jWNQudlsG-CgZ=ZyYMq977Lp1!JyO_s2A=+ z(Q5^>UoKo5AZmv=}>kAx14u#Ky(J7}hQdiTjTu^|l(1#w9@a zK#$C+Dr}vz75Xc&Sb9l^ypFL5%1S~V?_e0O8S$lF8gx^V@bR7kiF0C6cvOwmiAr3Y z9fEaD^{A9)Lfu1=sI@5wqi#ju+FvVzQ$i5HTD(#i(FH1~O?v$Lt%2r}3iVS%QNmhQ z9C-p?tqItCW-G=L2TNDSqxzg^q!uLNZKeSwHYxbHM2#_j;<3V3iTh4+JUJf^?{_gM zIdBAx?Gv&4+gi-)nuE(PLhz@K91oi(p{aZi{Q7Lg)YMe`2~5KK_F8DB1!LTeL>P*e zW6#$hTpW^$wGTsaUD9Dw-_4j5p~b@b3iR2YhQ6gq=>N_D=lu~VY-+*Y-@D*Cd>1Ny z3C1OFJ;tdckYN*xW96dJv^WX3#BS8=x1HHbgP@G7_;M!#Q`c;Q-n1Qczp3%zLkb2Q zOhY&FKVd>DoXW-Hg_i-b&ysO&UkXMx(Bta~GiA4Jd8@hnwe4X?3D^@zX)i23XwhG2%7X%Bca(rJoF5~;2KF7Sr~%@9n{3E8I>JF z;4nBEZM>7=i7pMU zi0lIqc*>Jx12oClG|q%G?uTGoB1fIp8k~4%LBnM+@aer9g9fFewIu=T$4U6oC=D0S zs?f(b1(*Jd!IP8}EZ#0h>qC1mxy*w0`3lU38hu^m@RR-kIeXCSdOk8o<)B)2HtI~s zLZp<5!29R0L2JP+`y(hRcMR7Gj$mlu4_r*$k89sG2)=AUtqNv*I%&k73wiJvn2Z(g zO|a~?;{6dd>LkP=QmC=cEJ2%V!NASwcHqH|yB-Ao9G*klo+{Ubx%K)>O_#1;2E( zcRqpq-ACZ@)`|zFQ&_+K3huT$i=9dPuqMU==W&nG_LmxFb2_}!Eco>(34`+vV21O4 zj4a8)o6je4eBnzRdh-^VSH}>Na0e5K!ym8jLKSa7(n}rQ&AtQob2m_NsR^f(OMo*b z{5*dKTYn!#`ioNDrv$O>OHrlTHGI3XAFG?E;KfK2w$4(axM&~F|K;Bci;&j#3`QmI zL1DxV^s_O-GxZ+&sy`tnb{}jqUf^2y6WHujh@=IXhz``?t-~SM&$8_6Qc`-a*R83s{_X5^WrecrpDneA_jXy%=0u zw)s~J*|kx2vj4u>$qsus$#yJrmH7^AD?6XlRMx^$OE$})jcj6*TC(&JjD7mA;;BtT1kyT z8&r7uSVH&CN=$AbVPu99O}!+1>#WAqyDH4-s>1Q^DvUL$v44t$zeglI&5}?-FTtz7 z8k>A1{N5>{l1T~K3N^Y^kf2v8@n)F>j}U(Uy9%wGCET`GVRaP=Da%z@`ans)NW;Ek zYRu0|LyvB0Sm&t5+-VvtX{Cnjf*QxRO6XOp#L*N9@~!lhdrCZ0Q9rJw!8l&Rkh@A8 zSgS;mkkGrCgeLDKxN(k6?kh2;wuHXhC0s2{!@Dy|q^(imCV&3+iG+ckD!3M?krSrE zo+@hGrY3&qD`DS%YG@CsP-dgXo^EPnB{6=ZjF(!>W30XHmh)t_2|v|-5RaLdX95VZ52XSNpLT( zM#*{!1;r9{*{qL)3iGO|Fl`%a*;Df^jJ4I$T12 z-88%}P@-FzgwxX`z~5D5r4NzC3sKfIBqFX@<77ZMiM%&kg$x`*&mSbr^z~wwLwbknJ8f`ad7hc&QzfpnQeks|3JvZ`XyzfIB!<1br^01&CG3rpCW;-p?RmNmmI$ zoWFERiC){32x5OiDyz_Pq!MobvEO$j__IbI`zVnzLXCyrl`s!cVRus{R-IF!xkWeG_Gj=|1nve7#!S4gSO;jD`MSdwuCh!#MDTyq zmBlJ-YfFx^Z$ESSGjj5NWj@E7I#Ql^=%#_Ky%zQAtFU&J5}O}OI9`?pcV2t%s}i@P zB@7^^6DLzgW)V9TS=Y}>G@F}-dGnOehHCIsr^JvDCALjbA>=AKlB7ggKNXhLW<80u zhw~*AX(WXARiU(w3U%0{ZAv8~hDaDTNWud0!E#nYR&Un#J2fOniI(9KI?SRDk&EZ7 zD)okb^m%_U6-yN2zaEuy#CX2Y_JyWP@ z`@5*oV86uuXli&W`}=|XQ>t-~_g!qK!XdeYj0!5a-;(fBtwp(y)Tqi5#A)JyTJJqW zg2=T&)XuQ?s6sG zvL_YDse>gFYU#;e7vgT360N!^@$-q2YiVM}UWfLll^DO2ewRS4eMaqPJ^n4CPWwqn z=4Y=I)I46h{yjCYn3_qBKUpmy_=*aZM)RJ%B}}WW#v*D&2D9q7?#zaqckNvE{i_Q8 z3#rA#=g_xl_%@I=zCj-$9@pLA_o?-pCrH@1iE|-7ml5-8$eXIHW!uNpJJtf^>|c&6 zh?#yVi(>)tPxer(tm;YG^2X$@@&ar2&ZHHmt?eI5pY@7%}*S2BjJm z(&8nI=%mExJ!;tT{tr&55cpJuujKV&W-GCPKEIFSCbvB4d%HQ#=3|LV@_XnCdOQC; zihQhfOo>_V)u^0L-?&Zk#WO4xZ{1t;oiFOJdg3^T|& z6&mxqiWL&ZYFS@??owTcFIqMFyQz@Py6&PjZ@HpGGPPqVwdoPHc;|ZNks}gr7$lsi zN*_C{MAT7gQ)4Sgilho0o-dfDybzM(V$LSB3*}tuKB>Z4bDA!JnHz)T% z5TmEoNGQ)5e>JEup3kWKh5EuVMVOShM=tA{5HAjDIMGi7HY;(WuY_N`SIa6Y2l)Bq{4G%zt!_3RHSY@vZp3$>VN#qwugjNYG&9&)`Y!v;`r_^Qz4jnC$5bI-#IG! zE_=uSe%&tvEjdQl>-0%--L@sU-h$)tBVVa69jSe{!pW(wDtvcUp(1r*NNwh!HY!A) zlu+Mag&Pi9l<+%>Q_OC=nR%X)E3N6j|0EpplyEFmi62|k&;%-R*vdcrPV4!+*F7cI z9!eOf4I_^$@tAyB?V&;hHPeB4B8Yk7>3-%%_IL|(mFFXRH)~&;`sYQ>sUWC_HB?y0 z@i*T~4QNG8y-EFTO5LxlMl`j;_$&=4d#O>~Sq&dm8vZaRM&6_Lx0JAnIm$9qLOtU5 zWG~i{Jp8Gn7TT#%k-n5zwg&}Yl#uQyF(F-rU%vd_UnLsUpm+Ij%*iSwG*RQVH*33- z8Kl1o;jWywFZF|Cav|3bIY^k#Jn@$v6r9N{q#!ToDf5ZD94|E*HkX+3=-UUV$xd3F zx3!uyNt*9;|Y(DNU&Ca#Ai_%T0Jw510U>)VfTtwo=9 zV!iA2q%H?@Ow0uf2C^QVRrussfD_b+tM_uTzA5WWZB$iN!JMhaEPBwDKPuF#qQQ1@ zaeCVl>}CdN&2e@o&+8r6!DBx&z%?bNG*%&*I-OaA`e5OGSo2w(h_wPGesdmnlc*OB z$!m{&s7+rE+MA7~^nz#InCCvyN580$Ppld_wuZ$j)Vj^@%phh2wT1mm+eobaR$}Z8 z6&C1J=$@&XlpPo`7<|Ef3T{sW=bDI$=+}TQ9!*KHctXuqfj zW&HjMa$|QIK09zN)K!Bjteqy79y(Wr+t*bn-~)3O6-A9SDsgZa*LVZfSXNJk%6HUI z5$Et=KQ2iqoTEnT!F~ zhDR@Y1+((sdn)ukr$iog;{kQhtBwjUKhtOU`E|}GfPOWHSUF29Uv87*Pc%F z`-Ak^;|HmkynkUE9WHV0-R&jE&{=~D^oU4uBD$^y6WeR>FG0d^W~583>H4SCw~H$1 zM@YENYb$mmMmhe%dursaWwx>58isk{8gc$b#&yL5j)B>_Ul@79Ug~*G%V#QhW^s-E zLBbC5U}a4WzbP$hi=5*sVG>{XV%xo#{+%n>~-bp#Oei-V2Gbyr*6aS&(9BW9G}RgrXy7t zu!nq-KUBVhpXBL_F zSdFj5b0vO%(Rwvr)z(tuB|PHZ&IUfqoT$c0YPfAzHO|uCS1+Wtr%L$Xqd{&Tay(mw zfy7Uv;nd&(YFs7 zBN~x2@93ZVoYYuOD#e6Z4@rweQwTW`ZBYF!iUj zk`~q6)F{wu;8;n6XZyJ(ape9{h#J9@RT#uNe5t3w`)V2-=QRa++}j`)t*PvdjTRd? zh8o2dTfCF{F#2-k1K z#`i#GhPG<>+p2M?h->%jDtHh-w}=y)-8u{+mwH_!U+D!yJebE@X)qWXxHVxeCdZ6( zxh{F6K{nSF*~6)E+u7H*T6|uo#!uGd>Spc-at=dpa_y>A!f!A8bU=p(e3o6P7Nxn& zwC^-{L%o=uz;!d%lYJB>yk)L&*r`T7`ClvTf4$OTDEr>8lLi6I|L@7;$EVa75UfSu zQLd%9ZXDi)>oIcj>^y#N4EK+&)0fCE1-O1`pn?B*-g}z{%GcC?YKiPG_qMq1NDWk@ zXPhLhh7pUC2Mh-^5RGF`8)590BP<5FRugTlDto!OsT72A3AERcBy30M>A^#%SmI|u1pD&wf)*qAymVr~_9LH@$Te{-J%$FU zA#KpZcP9VamhVEc$h8j4)ggNH-K)XfuiRG{_rH9u!vsHa)rXw>!1u#ZI#i6IhVi#z z0&W*gCBm=0%{HFmAkU?;WhR*@Dq97ono4N_RcTw?IpSMIsx zXkq_SgTGmNJkx1l+^IoessZnQYH`Ms8B6HUNy%RSGay^5L--aw{teZ_haPi+{xRqS zYj8?~$tw+b-;#ABUu?(e@O!ixq5XAuy_b73hYcv_ro*pGS~Q!@xycN;zDJ9FKZtqu zCha}@$?IOdVeQohMA_)j_`Csmea!fAO^e7t<~;J@yDxQ$dYo8W2b(W?^t9@*mh;|K zPltijmQ7EX>siC9Pw8P?pLkjIINFpsvrx}>gL(wC(W4^0CZLuUUfEo`aWAq%h8i{N zYLWlifRj_T*m+$CH`Xy_v<5poh=&^n{3eHwGe7kTqMn5FdDO+bwi*m;!20bo;N~(D zA~uspK?V$(qK1B=4)Z(G)9I}rWUOs{_HP({WCLr%4A`NS1=9xcJzHlphEi+p9@k<& zv-NO#mWh2U&~Xo#+TsBxRp$w4{ zOc5FsU*xj}=`gCl6)PKRkdUJ0SpY41Q1>z_n((fl4jF^A=s7`)_waf-yse|3O>L4%D;6@rXxFUbnQ-gCIwD?OdAN{Vu=c@+zkT?6emm+u2V!>}Mc2u^a z+h!e_P1Is$svfPFJF@8m1A}x}_<+}_)!6o0hv&_-=oGJop5y*eqQhD8=|AedA8`@K zdSBmZM(HgR*6{sIW8%p0jdf!7=v0yKf)8mCpfO|JOC$X5X;I;g3CaaJ+@|Jl;#q~)R_4L|9jq{fbGq(xc02`@c0Q2%1~;x+SAO{n$Jfc8G@Q6)X5 zUDIJa`S{h{fG5OScdHh=`OHY-e%TQIpIR=G`$i2ihzBJ#_WVjcnz9B({8^PwdMpai z!qG<$$22|c78?LBJ<1XdczBrC+-069&P05w5r24!WUa1FEsG?1T^R#&2cB_&~~5>_Hi0q4Kwq16S!93 zyXf}av`Cs}M2!|Y)H9eNu}*zfnQ(kf2EyxG;E=#vv(bQb`dmnV1I`>V;?!sz!WQbV z_^k#`?JTH8JU$51;MA0KwBhq_1(@(G+<>c3b?_cbpGh!aZA$~r_R^z0GeHo~P?#SX z(J7bv0N$Ahrao3oVSjEI5qivoHlOvF(#njmcq7{Eu;5xY&%WH(!!t$$pN)L}aue?D zHQ1A469-+r07d>jdHlpyCiTjsEgrxKR zJ9Fk~>f92g4kt613E78-#BRQ`9zM28 zS!V+-tux`PM>_tl%!cNX4(slkuu!JMmG(JU`kR;_pOTIlaDRpg*9Yj}aFypY%9~&f zHDRBZ89}R!NN`QZ;cj|toUOy16&if%!g+^eif$fPfz}KN>w5CozSfz*RgBk~~WMWR;EO=%bnRoeqvIl)G z+K9;}9o&bfWB)^9{44oaPYbsidK?ck!IwNv_@u-3b5^wGnA^LXQ072)3#n}^Q*O_2A}z>ivdq||^ADIE`7t;oD+ z#gvR}G^|H2p|`K;WyJUDM!pBu^Ie$^`{Snwz@9hy%@IM`=n*D@=@ zo|w>}vjuOaSTXfiCPsHM!+6|^WkXF^#hwO+@SVz1Gc;4pSP_wlQGa!aXl%x6>T$qI zD^in<7=Oix_2gpnzufQXphFj)-T1VHd&U=eW{Kx*Ci@uiu%(gfPjc~y4vBn!v9YfS zA)nG=6R1T&eIrf}v7%CMBc`O}!tOqKH`R!7&-lKmr5Uex>QH*YjNsAa)nN-fl?H6t zM=oisXh03A{@jAP9P?IUCdB2V^6HnP&AeGuo}? zI69gT&S&htnTZZlt?(^xM(3~Wb<2FTo?%5KakXJD>k^rbejly4syCp5)rclP&G^Z4 zaqTAOVb7{eeA=bQ>|JJ*ok~YZ2+tk{S$M|Pf?|5unR7-w-D1M`%jsAw(oyLp&+Cv& zr(z6vwj%>iT(e-)jGS*wF89jD!tYona(Z$R1o zZ2T5xEdI|3*8x_<>@?wF$8?PUk&YtPE%Ib0-t;+!oyV!0U9ym|Ed!4@=HHoICv?!G z$?QxVq0bJd=dT~njF)M_u;C`WF1BEIODiUNX5d0u9umCuFpoZnOgl3YZt=X{~sqQ9pf~=&)^AIt@2YQ9726Vtqen zB4|JR&CFk@Go$M1Oq8uLVzb_gMj`YD=BHH)GN4&+g8I4zqq)v;-dG5!Y#(G{22?iY z<47|zR&Ghh({mX-|DngQCglAK3p}Xd*$2&-{hB$98h_h22kV#Y#ls=2Ys5iRHx$CO zKOJ?L1vYr*VW`P~WpfRv8gE5OsK=NqeYQf*fJ&^Z+{-b!l&t&u_6accAL>~TqcYS3~*eTjw)H{ z$ewG&>>ozhUuC@)?1SyMEanh1+Urd4{IVbJvRwF8Ht=`7SgRlt4qiw{WbItcq0mz%EN<6R#cv2f*1KXe)U0AyG2gVwqjFZI$pl8 zBEFFc9Y+$seYggjZb6OR**KVPz>@XU#hffGyh07M%|-H2E7p2vU}dQh=E*sD>XwJ~ z$1Lc%F$Z?5GvPyyh~qg}yl+1?eJFyDocU{)3C~ju=9fHPiI&$ zq+dGfjm?Fsxdm>-c}&Y(I1b6cDq{U_Ng*oEF2c1BW`4F8=eV!n7km&i?alBTm;tSO z7Vg_waeZ4Brj1O87q#UO_Y!_TGvlDmImn{v^^1+Lw8%gtH8J9)84-7QezSZw-sPIn zsW1n(Z)M{GGguTksJ&svOr%AndnVV&apdymh`)Yocnv8!}M;s5A~jK zz1}1r1Bl}lQZZ==-AUtOtTF?r};YQ@;KIy@e=57}PnaA;_NC)e`{<&5~sYaX{XqS8X@ ze6a~lHc-RMGI08NJ}S*AhIvK?%8_T;V+xQ~%fvGs++V0{#$RH(QA!3jP+xrqo1k?w z@m#weyO>!!?&VsDxnYpOf@o%o)scGW+ZZv^dIjopX1<%qz@D^p_)%xx?qPoCeEdBM z@r@bieED4LnR5)eck%z7EnZd5v6G3O1b zWyCyBD?Zur?5V#MLG16N`C6<$O`V|ET>W81jS<;U|J#dV=Pbykn9#S!Q5flw?Jrw! z&sPiIhSVRf$-@0|Q9U6W-Ig7|`}x@zx@s?mSIoeX}cCC`kECDhi*R>tUv5A`Vj)ggb8%uv0UjO9;rndX zI6nh3ocNss8JN<|0{^f)c+T68j%_la+K_{G^yGQ;vYe1COvo(8nBV*O4rf2s4$Q)h z`ssMC*Te4!v&&ySw%Dd)^EET3IAvj*avuusnsKut@yA^K>TN!ZO-rzHo|XPtgw!pW zxYIKe?nPFf|2Lv4^IP+XeDt25iC5JAK=x!^(SC$K%f_}XIT&&;9~NrNxmL&ce$0yJ zRr64Ix&W0*GT@zL#)0o<*nBF+zsW^#xOs^0aC1?7sSq317IVM&C_a0aAaKti_%Gd$ z19yt?>u?Tk79W7Q*&z&$%*C~}2hk!v4^aot<7VIqI8Q!}sSPio$(uvyx%)QEtFL3+ zs59)}Rj6;5qQ(7UJeYbCKLf6zbV)JJgdBnX$63TV-$GQvF$5G|!IyIvaH8!A1Up?t z^R^ez=ja8ValU|_gTux9k+EV= zg&2_>5-keOgo|?WNU?Qrw3w)m5tBTk#q}9cV*QOMapq#A7&bLlWLJw76(hn$>uHgq zWO<}WiwP6&=0yn8p9o<;J5o42iV)WhMu?Wl5h5}+T=;d16y~Tfal3kycr+74b_RFG#YsY9YY+;np zWJihzrTok`N_eb_70o%G1>0jpi!rg{rhAO&_9Rz5Iei^UPEF;xx5&Wric_@r^krn8|7l9HA1v_93hq+j}X02g^GabVWRz)a4~Ch zggC}=nJa~gdH!KST`^3w>l-GTeGV73>_f#I=U8#AQncu&i4KG}Go(~sGk46c7 z8}{Kql=#mhN;C_O5`D`>3tjyPQ8ZI70@$-Tb)&_uc9Ejxy$CVwRpp2czw(IVyodtizZ$6iN?E7lm%{du%#d?_4EfZ;a@3SuTR>D#Tbvg>ZTkBc`Q9i}HQsV)^`7;Zmp&Yr{U{eNd=kVFqe6_YBNwN)#tJ#de`Z6R=zlap9Gxf^mp3Gd zMy5pZ_;I}OD2x*iAH|7H>*7Sc5{1a}be8 zhZ4n@6LDhqiv%%Xf0Fp#CPDnyH&LW{CX3X5Ng}C0E}m^p5Oc35i52}4#m>qJLZyrs zccnz(ZHyB(!{UYS?F2E5f7ke)DEvbb#IGfBV)p%b(WgKm{+>({%~RvVnaFtYa!|ae zRWnX(ofI#|^54(TD1={Vf+&+Gildr%VdJ3?)7mA8FE140S7M?#b0|)%U#$@OBi{cwz1wC%(K*5U*y$iNB4Lg>Ol`h#r$DZVpNmrQ4Fku_=k- z>5c?(g8e??o*>-Z;>Drm2}0_UB$nKY7vYoR#gQ#MVc#K9)Ekg2K9X7CuN6?%pZliIKhP9WQoVP7=f0C5ufBS(BZKVogz^Xs|9` z*iTOq%lX;8IjQ2MnBELrSGP7vi&l10kvI5DM7ia35WSq#i1uF{gkvkeJCTPI0W zD4!tWSffe~i6ZWIqUcdMiG58HDPNLAFXu$@WNwm}{5(lm&nAi5t&)Z7?nE(~y%whv z#Lc1vvC5Jt4qQ$Vs&}mUkpxk5UZSY;H%WXfP7t+N$I4fU&#ww0D@hPBFOr0&T%4#8 z5HFTrl8fPA;za1m1Yz$LC!UXp7q+eCqWNdJaA=z-8hIp&@yT&wg>Sr29f=d`R>ldZ zo$=ytPNEn^OdPl!EBwOaMfK%zqJ4Ut_*qRM-UP*qKh&i<$K%ALv2szNO0-BQi58Le z31a*(xi~bN{Fxgk4E+*Bq%}@l>7FWXXjI}>^+b`-ST4R!N*0NM{MqI`BI81W2qpI4 zO->Zgzb1&QGTvjZB+5NX5PnU!nbXLXv?uLE{GG$&#J`l3H!xaw>VLLfn0bE zCGW>8#PhItvFcu|usJ6eZo`s9`2dBuw?{4#sn_3p&^)xZ)A+HYm+EOlD}>- z^og4BLjEOIY?!GK^25>MOINvAb|O}&ofN`l9{oe65OY4q2&X`f$vs>YxY2V?g^T2! zVWQsI5Mdh`B5oRE#K^_0U-KxjsDFg;riOl987=P$C+)We_^Ap9~&Ft6OWbvkh zN{sE1Bpwe|h?5UeMa_gvF)=quj3AEw*rkZZTln04D|WcV ziWa$XV%nQ%(SHdwoq9O?ON_|AEfwk91;{1$Qp^S_b9cw7W&4eg1{8X&i_(~z35}&#Bph;QLVuYVUT;g~} zmrE4Z1sXBnRg!S{7%j3#DMZkBxj0o8Ek60ji|Of{8}<3uS$h9?xroV%5tiM2PUl!L z`+KZ-vQ{qU5eH?PVnu9ptXN@<6*Vd=n zgZvs38zYR}6vDQ9oY34>h?uWYVjVx*&^uNXcaw|CrLp4tVd|_)lDOO*@jT1u>6=LbD7_o0PvHqGpN{bb%iFbJgg?L+)&*IoG2eD5ZW5oFG zv10DNC=v8ZE<*Yf-yFNcm^iUeNB+Nv6Y>4y1kNdh^=*uZ`x+ziUqlE8GqW0Tp1q11 zTBZ=&T@)gLbKOp#D?J}8#`ldEb5_TS?)2p9tasF_NKq*$T1==BD`wV-7N;A=irVy) znQx;-Lt-nvPn<9tD{zVDTV=-b%eYvnr*dy|tV#RY4 zKTnJkJyK)Ekil{>N1+hAwCp2uQF=p#xXtW%&NWV0$)*0Rn@2BxRwqV8456o;h!H`X z;=~R^oUpL(qw|@Gm`lnkC5v6;^_S7{qBM>eBbTS1l8Y#fLUbptlG(p8^y&ZO=&IwQ z+`2AZitV+#3$HDTf$J=>TkP({4iM>Xu&_l!rG^<&N>Q=vUhCT32`Zon-+sS8_`xvG z^PGM5*|FBzgJx6>W3+tDXuN}(*^}$+K;9fJerOiP z?o9=|NZ$6`5zfCx2h;Fn2t7h}Fd!p@jX#C)c(ZU03l8JEo;#SN1Sgs7WH+g;W8O(F zOZ^`7U1Gs-C-r}alGSAIQzibS2Gpt_!RI$4*j7oel|2PZU!2e|l(!GKI7YEBr(3x0dFA&k?fg|O64u8|%VcRiH;Rrd1O z*f2Jj8^XF0v-@&`xbJcx_qGjT@AH8StP#Kj!PeJqg)(ev7_B?+WUy&4cRB=dhWjbr zTNJ@2YqIEM9m(>C!K~LSoayZ(_NA`f35sBcNkP0iNqR+W8k?5v;*_9WG+hu%H@zb~ zE5H919nLO`!#G-c`m8tmct+}IOp@4sCy*U3hwwtrP#VmVntN61Wp)ndPL1HwE*X3= zJebaPLbz~X5Cigqnd1<`xsL+bqiY~T2L@9KHubt8b`RgpDgnXVtPP~0z1%xHfEL{Y zctr5?goA;!dL6(K9%27)(^SEF|F)6ZBbc_@)DYIzm)i4p7;Clkw=Jb%MF+uEcx&AXZe9wOa>q75&&od>r&w5RW(pv;XX1J`;SX zy%@w1nL+IIIf(At0yy10kPDiIa_gXAKHMM5GZ6tC^emJ{HbETvD1=kt&z~zp`QoWR zwY`MT>`aEn_(f`6p8+J z+2ooZKX&os)fyo@zbufyg%dqb3g8{#fOX>o=_kI}C>*KN31429J{Z*|h)xohdw+$| z#VUXmg97Qg$(O_Rf@vn4aq89p9>@yfy~{ov_{ops)D7gBvHtw0B$!J| zym`pVkBQ_@bc&39zis36vWu3zU)})!-`-2T=L7C!&ML)Wq8qeq8}a3`>^gd ze_DlkaKbII*~*92&U^B?{QO?!y@`z1ZK*m9~3aIcTvrSC+c-YJn#wZSrAYxf>sM@Q^(B zq2*aOzF6hUov(dZSlyee47~W~yf@eM3}D~&0SvnC%hi?s3|Z*IPVQa|ck<)o_3kW* z_2t}!UcB$JF*EO-#Z3DSdxcV6^Cwv(8$&(FJ7wbPv1Xzalsrv+P4g3`(l3%I_%F4Zvt3yz>hQD`jXwfSo3T!12g3LmO(VT z@6L{={aB^3AD?aT=j0v19Quzhw;l86&JKax)WL^OE(ZvncIT8E{ye$gpGK>KIOK^B zQ)C~bTs@gP!h?@m_^|bG4{E0d(zcE8_GB-{c>8fm2VeH|^5V8N;_pm}lLi4iyvm0Q zg}WO+4dggEhubdRban_}ayLI-TI|1`l6wTE^=Ut7q#;}=~LI8I%hXNlQ>x;XB+<6gG(-NWyo=7 z4u0j#x*a{3wtqW)C8y5c^phSfueW=$?h6mrv2^E}pPt+_S#*XiF5Hu_jZXJK`EOF)af7}?~*_HMt z?tD_TonI{6c<_K1$4vBK&_#DXo4J`=kGL}RwI|=m{mqRfuCBRo{tHibNcZG~VP3Sm zBO1qo?R4JY&iGL-yjjnMTL!ywR_S&=o#V##?Ob^4;&yIJ5UpaX2d|IvqV9?ZpI7zZ ze~-OcFx8(1*PJ=Jy(>@Td(vODrLk>%xvz%{H%Gg%D9MW-zq!(6yBp7WxUt(muI$>x zi`_1{F=mt}-#&5Us^*@&t>?lQpIkXU(JwZ$^tr6v$eGdfU`QWV zj`ecm(^+ox3UuY3_HInC>&1X3p3EQX#@Exf@%)c%jGpPrslIO9VB^Y6sTKP>xN`C< zH{QMA!zM)meEm-V!<+iiXoegAGW2HC6gO7$lG@S9n+}iMIOU2P&DTlXc9mFZ>ciqC z?({YB_zl-`0;2vpwjZ?!#I}KHR#@jlZMaS+K&BFGl*%F4U8my#hFNUI43( zm;0W2vviXO^{;txaiu%6hDk16_u`F)o}5?Lhm@R{BmS}wAKsL>%^2&&i4KC5hPd(b zW)Cjv;!gkLKHPo7l}-y?Y4%aHw_l#zVCcn!X})~2!<%&n1aeWQ?X->dFvs0 z&fdIY<-%vi9(;G*g@;NcPy6}tpD(W5peNiyw6%)KzPv28H7LZ34+4WkXA7jxJCORJ zf&4GQlY_4WbDePz=Zbc^K1Q&^79ZYz5zhLJefh^Dklm!#E^p?;#iOL>1^TjZau7Xd zd2x>9eT_!Jta&Sh4@dg5x~%K8;s|@64CdUkA)Ni(k9kd{Htq}J`2hj!-Nl=FR)IVr z^<_zL2dK}C%_XNue_Ne9|*zblPPd^T#sqm=*eMJwl2xW=vDfVc%+#kVb z>x20zRd8&w^z;@UJa#OIGpmKsVDKIulV?v5tbauGqWCExbQ6u_acU4Zi}v{Td@!5; z7s0~^BdE9UAU`z-;VI#;t-gkEbwCK4?G;_)`VLwMe(Pkmn;oMfIH3Mc4qKKax}85~ z58KCB(aEap+sm<;5&W!;psRKVFFHlC$IwuE&kpC7tOy<`5`I}KSX%gO&YK{XT^Fsw zAdt0k!&uZTlILm#arg9{9D6>9Zr#Iq*Hkz~d>AJj4`FnIT%Qxnpr>Kf2oI>J?oY4q zKn@d)`ieQ>=h+s4QaPnmUi+n@*bAAY|1)Fygp5zm{m-a)2SBU>6CM6Sf*(_ANt`)^^3UOLPG7K-wPx?-TH&r~hawogUv$lB z0c>LD$Etk;xmtL5Q^B$yhV9{&gmAh(3Faz4!S|wtot`KflD!X2CWf%8fgkVp3F4f? z!WD%7HMcmzr=kfv){vMo-px&~MYmfbTIaf*-2EbgrrQJy3npnPvDNCa_*v!`&Z2W< zEffy4DU^ARdl(>`fAEK${L*kYTbc)Rh|CQ1CiwGfy#P*&3K#uSG^ZW=SXVfD3*jKM zpUb(73+KR{VXS*Mlr3Hav7{`7VNzRAgwLlA zh~S;QvYy0RQjx@RZ_z3Zf;hG?h;OHbbLt(@^tVOu;rIaFjXcasv8QlS1b58d!C&+c$;YZb!D_aYc4TIucAd$})T4`Ubl3Fh3xYOawy92d?TF`_Me4(F0(p`s2)6vScoyt(m#FY_OI@RI24OUL%Y#n0{n+@K7e_n@;QQ_}s~qLWt&yQ@;?Y{25Ro^#@d2K+HZBHlkl_SExh=okMNB? zf@@j^@LNN7ra1fa)XQyj4PVPu(H`tl?#}c;cizhN;PeVlS_!tcd%KnmLxlI-^Psn- z7eg$(sm9Y-MhyCu?5zqStJ9UbGD0%;|1iEI+?W_Tso>Zj7_q zK>K?BtSkFeKztyng?e-^Wl{ecb?uE$k9u7P-`vcZ|F(3^JY}xcJ>hLnPR$x z#zS2>xW{HTs1U4?zFzp*YJSgiVQJ6xe3<3JoHJYad-fVG*x<$m3G4Xh@jC9^xP`gb zUHR19oAqSh)7HAPVVNtFUT@>h8LqUR;mpM^oVfI!JA(#n@IWJPDbwZF>~iW z<;V9&1;a*ep;P;9d?8v59bK7Y9mwRx+jw@S3+Gw;@W1P#t?YN@g+U%%^TmhzKD$yA z@5784zPx*Ai_A#2aDd$B-$3paEVk>QBMWA^^QpTFzqS+{KV4!l+ldVd!q`F1@YVh8 zY*Qw@q{lk;aT1JczKy$DxUl~$PabdM!hPdh*?F)VgS)L{Y@YDDR8Q{z;lLq_JUO=7 z7RlrF{4!|)E$gor4aSnz{9+kUPiJ11;q$DJ$Ks%#67$a~SU|2K8q zdA7`l|1NOn%HMu`E$6q%!IdF1H&QcbHJhJyWSfHJ{BmH4aMQW8Y3)hRsji&X&{_KS zc1C21?`@W{ciZhuHgjW_Z>wpE?FP@TF9=!E_8wad#;j2X(IMit|n~iklHd#NW&079bZxfAg zFJn$)N5-{WCU{TcBz-Yso{x}O!gTpOz(}|?99JOU5^*S%(Mg3*8n>8!GJDjNnbWUDTd*}HHmz0@dPoH2ynhBNu;%P0nI8pWlX6zg^eGeSo2p3PYP zahkxLrjuA!e;h}7kL2b<6F9A41lO#Z#KzGhS?E8KZ4Z%mt1V-dfndK6Hgvf;j)E>@-)kgATOrfZ zWH6lTXw5&*$J2T6Fg9H=ik__|&;dhfQ>JKlS@G_Ok(|B0pUmvQ)lY}=p@kit^oFoV z*-!`%Q~n-KyIQu=pGVSrr4>iG+Hua4;p}K>$01Pka-Gb}5_rM^Xz!1(?}jR?8nYAW9Z*}AWaT|SKEwb`|L^LUpp?CXDf4u5mf8@ zb8}gLMnu`sY6sXSX9k@Mhtscc0S^xu#c5l|@yM8AY>+yZhnv}P;W9gZ%^pvuoKcL- zo4|Da5p=r*p4xB2+6~6=$o6q8NgGQmkA>Vkb1Lg>AH{ZiZF$$QKclKm6is{r6VHre zn8P5pX)&4;wvwZMj^qA+`*HT~+3b394)bEBvQvV@RLBhW+&7f3HjUwqUA@@D)|&0M z3}-I~d#-piko_bEU6Us8b?wot-&S$izvHAf4q(9jQB*A^@O+!`3<^-(7cz{sZHLnI z`%v18FK2ZhO3#H8SXPbfkTG0(xE=<~) zhBs${7u`p5{?f7BV%d*o*RA+5as+=h9w+*a6`RKn;1qvy%FUrn9XMF9a~EDtwc)&P zeQEjKhUZUP(>8xFS5C9#e^-Xn=u8i0f9uWG7JYep;1D)C)Sqqgd$PCh0D9E!&wvgi z=+bB`PiltnXI5Wg??5&^XT^dRy}9DzV76^;Lp%45jB4JSHdP1HCc7WIOjP{(wl^a- z4rTmVJDEW%I{njy3v1YL;l)90Z_|r`0|s*%2Xnd8NIq^fknzs8OkLBP&c=3JJbgST z*RZAL(op(-{F~RG^&moFig{lrGV7mlys^AJPZ|zqn4I$s+2{EW zqqyZGx#M|ne)O^BDc3Gc&KybqpH!YbJLkpC#Gw$uiQSU;xdw_hE_7j=tty zdE)j!77iKAcWcM<;{)(h%l<5$K3rzWRt)pxC{iPEfoVRn# z)ERuacoWlViKfzYE&Ehm#|=leuu<_QIwh`UrO_t36|U#gb?fQ;b0zKjZsqX5*E4z9 zCVstU&!DLeY}(t0FKidE*SxKay1!a-YaNH%IPlRg!3F1+aaG$@9N1zdo7isQRvRZ; z?e!K-U9`d6RV@Fxo<38T@?|qGcG$jxGn#MU5dT%IT4e*D8m?f4^y<6eOE@&do8Och z_lLRjo7W-^x#G?!mkk^k>`Wg!!9r>)zYG=~C%}6xI)Nu~l8^m zu&&JR=dTw{=T8U|-UrkBVlW#e`?Dxx8$)HjG(_|!d(lb<9FZB(K$&-6_MpY-AU^Ia z`hBEm>f7Da>4 zik9JSwCw1~9~CYfwr3Sz83{&tw}H==tYYt0&h+d%gKJ~A^5`Pb)9w8kW8lIP(bH;) zMs`zXWO{No4O+M}^Pm?`j0$AbQhye{4dN@|Miqh&LI#Q!b3c&AGWR$wcCGv7&bzZ# zbL~YpZv7^oZ5TP1ADRcVOfa*J*mc2G^hqyY?)c=y9u?bpI&?FS-&)7?iDJi~&1~Pp znR&vkekaL{M>NcXqVfG4wvKg^-Dr~M%nuFb@Y7ZYJ`CQ#&x4&9dSw$Y3|qu*ZP(N9 zi6b#$9Z#-YPK;g1$BqkmG-4Uwbam$l6IbrMvVeR0F5#laZuC62l1-mYV}}iMrS1=9 zot+zLvUoEc|K7yYNy|8|={g?&I*0R8XL0EZ2daP-tnv3oHd;HK{mq^E+haXVR}oykdYtGW2xRF3Stl3xACaP#P4jJ>gt5xPQ=YfH%S3nAt(ZlFiz}HEwt_3fHcjA6 zI$T@D`Jqc$KYbA!++Iapn+kK4%GPk@luvu-NwecO@h+|PecjlFpR{~ zw~;t8Yab$V4&dU{gD?z>g5}c#=y&-rA}tQUQ9lY1laAoZ!YCa1atK529f4c@!9H#*Fk6m|UEMrT+2w%Q*=ne`wI;jJ*CX?8LyQY!}U*2Tc6VJyzfibIu^(Xi9Rz`9Q~ zg8Yu7+nN~U4L^!OV`A`TS2SWz#bNK-XrxSxh5m^XC^L$|;fu#GZ`pC!tULjaWzqP0 z_b37);-K}3!NEC4(KGi1`~ze0@0%DTRgcA$5ph^I`8fV|i@}4W6Br#Di>4+q2%Z#! zTi@a^_E`*adc+}V$Wi3D#lq%e6q^2uh1wy_rN=P5RUCXj#^U{w7_4I~Oa~vud&f9jxf=(SA$FI>!uOr{ zBH$QWtc%91{>KrgJBIvIu^1O|0#DDy!em1%sy~lIvsovw>}~=M?>LTY?~b7`FCI0= z#3S%G z=o`tH_@4%I*CpdpwB2G0(z@?cQguh6^%2&zwGhTxkr;<=4*Dh|7j0joV z{9Fn`+G!CGl#Ezw4Fbwfz_GCwUlKKF;H$-r%6PmlNW!Pe1l%~C44XJDOpP_ToS%X` ztvq9J8r%n^p!y~)Ei87YzAUzR=3Z{j+#wU&@x$zn`1RFXsLrLOhKnHsc3{0T$zxJ$FfejcPcz@-9aAuDx(+8kW?v`ElPLrYT)x>=>+f?Eo*@98kvI2E@OlX2NR4S8L)cq{fhjZH`4 zRW1AibQmYLdkxjWPHJEGHK}lTE$e=kHMXW=VooZC8S7BjDIIm%reXd^S@T{h>d4t{ zt(AtNCpx?xm4-&YQn14=6$8q(aP6+c`QhoXx|WJgr&7_rJWc$Qf_~z&Rb9nj{Ziq3 zG7arLQnB`k7R}<5fpj^i1TFr1n2g=#8nix|g2M|mxYH#W!}QZ|T%EykBRTu2IdBh6 z#)lOeG@h4)Z;naGH%NrhX)QiBO~5V>4X%7U0jt6!G?<-?=;1zO|)=$B|f~Egwlrv_&E3( z7M)B+qIEKCV>FmIB?%ilr=ef91Qgn9u==VFOTyAn)=Z1tXOocjUosl^NyX>kiTE)? zhc4Zd&`#`o-6RPEyTv0>YPk719U50?Q1to~j9fJMoRxwBePv&blW=WNGM08t#CPWt z_%3veXIUSGKi^;J26^~i_H5fEKRs5HN zWp+u}UM}{wO@eI=EykWrgwwyrVOBR0t*$4df8=otJCK0y`;O!Nw|I0pmWc4ecsQMp z$7lTnjQx>_LY>qcsU02r#ADcp1bF?3!y3bQY<`r8J2h`$$ewiQ$=U9`kcM`pI{1gk zde`zW?OZzc)lS9uw^@kobqz;8=i|zQ6mWVbipB2B6Se3dwZ%oQ@mQFI+Eyu;5tEDu zA5Oqvwhqr4C1PN+MAVjkJyv?MHX{WE%`}KRq(S~p$*asn%>A5)9g@eLq(=0Y-!-_W zLETpx96Y7Nxq&IT@Jfq^qjfkuF%{JoXkqNHML}B~COu2Srkq3s*G@pSh!ixLBz5Ik zGL-c3cTxu(#di@B=QTT~z_6_bXUgQhf@I`TgP9{VsJ$Q&UxGB~&^Z;kYZFmLYWZmU zWZ0Kng#W@^^iewWl9-6zpoOD%0)AB_;&DY1YV1pbE<6!?Hc7qErl5U89d7-bh)=(g z@zTJp)DXDWQ8z6J%!-UV;`zM6vRWvM8yrop)`iFmn9gU63G2s@@ht1byh zT#}4yQAubjvABFuDuzhiNs3HHjfaUC(OiRO&&9t1$v9a}@_J1QF5ZyZ?QHQx242wO^f$?CsR1?rO~I1aiRk%U zgCh|dX!KGLl$r=%$xFTAI_%k#hJ!X5{HrvmIy@Cy^mSrZ4M%0k_2v5Pe&072^PC@V{xqnD1 zt}5~6-ZX^l%*3wD3>@8@iV0t|i0YhyWnWUzYeO0qU64MrPlF(B1`MUn)?6w%|3-^v ziK)2kl8zPy(ql@sNSvlagJ04Mo9IxwISc(~YO&*z7RCRip{?}B$zCZKJ2)M4L(*__ zLl(RaYq3IV^s;gttmN7-=>s8K(=csDI{ws5!|sl07#6HWNbeNfIGlm$#_9NCsKa1e zE$q5xVzQg`Q|Z;dW76>Ab{hVTk$8(t!GafBm~2i*fhGmb#h#q4k}G1HMV1aPjWuxS znS#BG(lP!)8e%FnFbvnApmq|@SJB~^)T8uST8uB$!roGcX|>bw+CYny8#RdPU4Z&h zZ;HF6U}C3u40)k}Q9ue_NxvvMn1R!0G-xk^-y`jTK8|jPEo7cTa!`=IWM<%4e zGc*-LA0%PTUlQZHbQrKpa`^ds>>jSgk&buaXPtr812y1-bQqSWz-DbS_SBTUj!4CQ z*<3I7)6JJK?B=@wamZrlidmTzj@(|Bd z4Ei?}-s7ad%lYN3Oh>ZxU$0R*RQ6BDo4aXn5&N2Jw78h5g}Yrk-U)U}+?s)c>Z$l? zn2M&`ve0`^2H>6wy`DMHm-txfDEP%N4M|-!*c+7rFX^!XMLMi+nTA(qwU~b)8CLC5 zkzSdCar)`VpOTKz-*o8QC>`r0*OE+9ktDe^^-DUU{4z0L&itSMGBHT}dr~mR_3Sh( z>6C#Qhf{H>NY8s_Dp(`kYf&>Z4$&v0Kt{Yq1vHA=u-%>?t<` zZ9b*pMszwN?X+0@CuH-sxLo{o^{jE@pu-}|2~O-o=S}$pNWm1(~$F7 zVxxH`;>=HCW9^f;y(=3x;sxg^EgD#+q5*KG+ ziqt*dc-gna{XD6y?*C=rNaIX|Smk4z#8)k(L*G*H*PwKKoRo^^>8Tj)BAok`4xh}m z@c5%e>ti}pA1-(bT0}_w>DpKa=aCwW6HK8uM2B>zRG_!iNWpV|Jkk*?_|ASyDr%>t z;eC!^@i{tN-jRya&Beb-sW{v#9aRcbaraUd_V}eDrgavg9Mj<;eJydF#M3h^rsrhh z)zvJhYiY>6sezv4XWOx<_=Yqz7@r~enua?arBB>T!qXxxUdC$iri#}_=D+albM5^?FI8(6+B!|{A8Pf>gQAO->MYU49>*?+4~^zkGh_Qd`e!# z>R=F+iUwCwF)B0*x$V<1TJqLjYR5;ZB{L;=);eY4n2Xe&=nPnz7P02*eZDb z%8X1DndV@eTLyO5klr~`aw=MfJ8yMpX_o~X$&K07PNQg=bTZDMaAN9Ne|aLVTl4ytYlp zjfeuAlzeHVPvIXHW)0KK;4BDO;gPRZ+YU$Ss%bspC5K8X)< zZ!4#4=_?sHrOU#xA$hnWx!zIkz5X}@rVGzteOewaF1&@1zw_|(ayGQT^Woek509l5 z3~ii`6`KmN`o~%Hh&qc0Gf!b`c^1Z=KZQLKJ2!iu#`Hc}=vw0>Op;Gw`RW4v_>zZX zF}cu{oWjVe8`@9Bxs7#s~8-uWCM0C*;C*MlP1$E5yQ&&7>nxhT`+q098sNYp!pP1i0Vd|x5Vr;Gob z&!I`!X*BY>ilz{jEv8tQ}}7vCky8sUx4D4c}Q4%8m+q*V7z?JuBLVIEePoJHV?Ggy~<2JQYni$O8@sG}}o+~hMjSau#We_h1p z>F2PvxDeMjT*c3UXK*f}5N&D~VtUQ1NQ^oMlM!byZto?;*1m$ZU9Vu~g!9<8>Iy#1 zy$sX0m!aSN68_$Q22-pr!0p@x80cL=+{UXoIr$Po^$SsD_jMR0UBa(_Z{g~!CwNqT z34eKA#D*sSBKG!em^fa>fUs*QKXD0GH_l_g$bX>!&s7+fUPofqH5{CD6$!(yVNL2a zRPTBl)<^HcaP$p4O}z{Aynk@i><;D}zmKlz4`6iV9&QvqN74Ox zTpwe##VhP=_8bL$pTKF!Lwt;Qj^sOckP`O_`}05JSJn%(e)|?JzCV@NeTQ~SUt|2r zx436%tmb6Cf$d)(FfZdN?k#?c6JK89cT6!lReO!8*9Cx_5O+SPelj_ zFGJgXU$Jy@5nNo0kX`l>J*yPKWNtCqANvNqk3TW+-WME;E5pw3-%(cO1Aa^`Ld}@3 z=u=*f@h+uE*0T?dVr*_AG{Z z`(jw@%QN;Bq1TRY*nI30T$6udxmgj)KNq2X(Rb)+OE9%p32b}(fY0w@xW4}a``TYo z*83xT{fdx!rWj6Ve!)4Q5>~B#qSXikb!FlYcsQ5B^?5P+#(u*?>(AKmq!a-&OEJLt z5ANm`<7Z+iDj)yBw=<>4@-tAgzkJ8A`NbG@;XBZ!6qoOnqFvY@JepX6-$VYudd6>b z-uerTK7T}yZhETZ&@WtZ(N`|wFU#g%P+B~`i|g6 z#Tdh1xa#^7cZd8$6@v<#+E#)|jmuGgu%7yU`UhsaRpPm&f$FySH)@sr!nqEW=sQSH zZO~MRmLX@o=Qk$)t*8Fzm*eWtN^n*sYS#RXgSv7Q-Ts3=xs`b2Q;s%%da9j`q1xEg zP}%!b;%O~?)$&>?j-NMEUvB+EW}gxy_cT@izAs0YHU?_=nsW4s(pUSg=&5E$D-m&_ z5>t~);e7f#v>j#tP0P^uas_f6N}=CTUtM2spi&y>tEty2;m}E64H=}b8d#L$#4tm} z&3dZ(=pT5oQ%`+tQ4Wp%FGR_{{Tj*|PL}FFS3{-WL|<(iudlWo*H^Dblwz@0Ikq0H zKv}M#dM&ngs#1yO)eTfwyE4?7VW8?2|H0O?mFRb@5^Aimy5Gu7nUtET%3daFNv|qu zca)i$U2deZTbe45Q~FB(q@fD&H&Syun<#H9ef8v%fjYd>K$)&GQ!4_DmD@E_^j5wspZYh)W6Hj)vkVKD)O3YbI*$6eAVV&QvWw zrKhs*nW_<4#%jO;1NBF~(!)&D?v6%k&Rso~;bE-CWErYp|)PzrZs;`Zax}R>OQo0+d#Yat4C8;$y68k188q4Yow|#l^Ez?rtAXsRr?Pn%0F0Mhg4$LPkrTepj>*Xp{nxJK()PXphiEaK!*ea zb$zLkS~c5HJ!xU863!Sa&#`*SxI+9Cq^HV^%Q3l)k(!!ss@6X=Rq>%lYJPP+wST#h zI+9?jBC8pwMW2n-sT)SBV}yaS3^P?oG*VM)n<&jZL*+i&Ty^VWp&Y*Gt4KF9^~B0p zMISL!FWZ``D#7|{z)(Xq=7YZ4`nSG1{K-gNF)~)IK3S*+uZ>iPH+t%QcR91(MruJr zBb6Rtq^cBJsMRM7m9@@TnQb&x7jKy;7xC@(cg8BMsi_+4V5C;t87e0O6V-aVk$N`U zSe@J_^>~b>8gFN$TAVdilbRW-LG~tUZEI7tSAM4rFj0@?o)u~4s(N#C^{s`8s(;o@ z`G`MuZ!uTRT+NhuUKQo=pMl!@$Utd68mV4crt0h}iGx{HRjpDZb!eG^iYhZuFDDtP z_Km8j3==bTyS;(h=4+zn$Ug5xo2sm~CTfO>v8uMlOl`hms$%-dUj~dYRLTBg(;Y(< zmTjtDWQ*-F`fB3RTIzbanfm#krP}LnrfdTYR6siuRhDF|R1-55G~HAUH8fR=_ZX?$ ztBsVYmx=mbTlT-*T=ni|r2g})qQ*BcR5Q&CmFEK!)qaqX`g@6~I#kC{otSH+dL|mG zdku_LAA_pOs$6PJ19N3{%tVb@V5oe=r%|gcRI}X%>SB(8dXizPrvI`~yXDLy{}`#W zV-1x{fteZ~X{sXL7^|johV|YXsutp-e}auv@oY==aCKF+6Me@fT@}KxT&gY zkZG(|Uo}&W+Zd}|ht1TGLx$?7wTY_L*HGPew@`BnO;l-M71c(3kbA{K>BU;8u-8@8 zq4}n2bvtu)p5|(&&P)~7HC8d}&D5Rh#%k&bW3@5OLYW*kR`W&~Dchx{YFLVy>h+(g zGO#dFw;P$Nb~VgYd7RXby;W7>+bU{PD`WNaiiw&y(p+_(YNm!aF;^#dnyMN>Rn>08 zs!H>thPpRXa#;M;_lcQWxS*=qrOegiTod)zJ~O52)K^b}tEmsSC2yaaD@Q|1HR-4P zFC1qr)r@=twbR2;9h<4I!sSe5D5YwQFjLp38LL-&%#>xif%@{mLiLhoElxL4!)I1e zf3}z??HvoXDa=gW9crqw*Hl$>G*SCwjMd1a7V2_PHFcw^nTn`uqUOt)T^?9P)w7q_ zxNolX9-FJRjm*^d&NbDqe=StcaC0@qy_&k8Z>AcUo2$9?{!)F5EY+A>=BjD3h1%S^ zw))bnhMGU#Sk-A(MWx<0S7&17`bB2yg4R-viZfGvE?cO~{Su4z=E~`dnVR$3T#c-j}Qr!uzt}Ml_P@TC-)BE4E zEmen@TFNA-wz^!ux|--*RoNO;Q>%5B>gABC>g3$&D(s7;I(ytgo&0X0N?(|(kVFgh zFvV0kY_L@Hsjl=YYpKMU<_e~Dl~ZCB6=7RjHMwh{^jn*$E?+E^Q*kxb^?Yr$(z1p+ zdb+Awx~94^E~ut@->jip|EQ{dh<%|Z7V2%&Dr#JCRW)QwRdpu4mbxmnc=%USl)pe_;9BNlr+pkwu`xn$v?&4zyv7>pVnYz-;QW@5)qQ**n85vt!*-DM>aoZzmS4AsL=#%lZmW0j<5tSW|9Rgq$c-UwsWJX>FRH#b&YrWmV?e0{ZI zp!B+MBenLFp;|ci5BA;p30rr$wwvITai!>FV5Axi`-(xk{$O03o|@&Xr+jP#KfeEj z4qLy;{Qn0|c$eVRfp@rM^9iSpmB6y+SJXXhu9{E#iXGd(z#2ukdHV~RCjY?li6scS z{1ul5y@zARBCN^zfQr%Y@yO*9GLC+O(}1s-E!cHKt@pV5`UA51f57iuf@yO~FyUee zZv0gSr>mcFf5#i-B$Z(L)$eFkrv%3;E71NyDQ?~VfjMHQU+eFfTIUZo-Y>=7s^xeV zEqiHKjQvri_?=pYLjk{0;q()0<4X{iQi24N3fxI5g^x`sO7DL`7o$=%dRm0QR==>Y zUl|^!mB8_^zLL{N??@wc@0gxiRaA=6r4@LKGGuQo#@2QQYR&TR(EL+^-|@eN6aR*d z-dChQ`3ZZk5}ZseMnCI6=-2%>VkJIxf-}pc*V``CSC;z?)ai@m@SItREyLy7et*zL zuymwN38DpiIq&|7I);+xa}3n+)N+^$t}Q-lsOGmUg=TI!)*bwfFOJ`_Al6*XPA$UN z6~*ZD@dtLj)K?wG>Z`~8#;WCZ>C0`(ainW8^t=5=e06|;{G*IJphU(vpVp!IZ`u?}MdfwJp{VN!uq1HkrnAA|tll4^diF< z%d46S=~97})&?qJ><>7(8LBIT4U~VhnL2mVROK`(N9Xbim@N2)tiOKYU~C22&i;ia zFMeWj&{s?>FF}!I8BVsZ#K=y+u=tp%I-&cC8)JTB#mO=(U-AnkL4T0-s1(}ndg@HG zO86#y#nU~-u%E7{%A>!d*>^p4-?tJSUjKl_3cg)P>$wX43vJEzMAp;2kKq=i4Dfa z%C3|2mh?(Ayl$vo>Km&-;UH}U=Pf-am}hteS{CT3WWn}f68GDLqx32J1DgYSs`pdD z^Yi~;-VEtk5}Rw^N?jUlpx(cda~8~=tua!A5{%WbL6wO5qNf_IFj3R0#+A!~l&SeHLI{NNXc6#Paj z{FeE)p}PK;@QXAPwR3O<&dHt!U)NXd-dm`T9wzFF}5$Svl zA|(h4sC3OdbR82DMaB9m7Gie?1}b*Y-QC@d3MvM6Vs|(C+ut92c-;H$?#!8U&g@>` zUADeTnN?m^Vab10*=J2t4{lnpy&w9sd6O;J6$2|~ zr)SG1IrC%NEZO@{=B&p4gET-%E35m1D+AvQ60$A2UV@7OdYhLnabivcUoU*p6Xl?7%HkroP6QEu21p70At) zQHB+3n`O?l0tT?Z_Lgk(Ict`&(vt0YG?1004q#L3`?I5t=Iq-qOLjzR&PMMvWyawH zSkW6Eqqhyq{bj+dUJhmLQ(f33$q-h*(wbeVAIxMb9_;t>5zP7lF}vNPS%}dn=D&Ib z3k#pf=B*#is=LOpr+>z>@9)R31WT!B z?CXxCPhOI59tWJ67=0ojsZ6%xa5=um&v;RsgQ-Q-LG9aKWA(Ep}xg1#Ya^!;yVgbYPkR zcI=3?2g^C-%+4hO>lqt`^146=K!nM1g0u6K(+@9?djbRTbyRa3Hc>Knmtl^(K z3+MCq+~mfbOWc`Ko)a5m?#?u}IkHQSY_tp7r*3V!i_%+2UYNb|=V{?dJ13 zEOB8kN4c{V4;-0Yj4kuu>cn2nbzq|wIa!tiW6KbBevvKP+h)s_o^)W|-9y=oKXxqF(~iBBIWy-k4oo=Io_%@Z$ov}Yn2)j@ zyB9f##g4ILA^JmE|8e%r>k{YrcN-Qz!HNy|=FIy48_XhhS~FjF8}`iDmK97M%;xf+ zlkN^+lZM%{jsX_zeyJ@xdef41_9v)8SI zS%cePmifkp4QR4vtE>m|HR@pYrrVDB^D+Ju3}rpW+p-jAduFl9gQdM2%-mJo*a_t! zEXc;5t>|!KWwDOT#MGG;2wmC5S&r=42xNOcPh-!beV9ahB73%QJRAFc8aq61Iy?H> zhmCtSk6C`3!_;m}XZqV0vVghMS>yoY74-^6z0tzbLnu3?TB*0bI| zd$=Yxv0YgQ*w3Cptp4wQR{H!9%Pif`1~wmNc<2ywl^B(=j$B@&tRH)MjS0_0vxHHx41!YFmy|Si{Gt|`bSq=`&psHeqKRT{9Xvfj4WCc% zgXE)%E`PPLcZf1BOI1bduHG0oSe-u~Ljy-XRm0-1N?0;U1@)gP;;lU@IP94+cJ=%V zt?Q)xS+i>V8TU%))L?=`#%SQvIRdA58 zLl3)7>EYY)diZg%7V4f>$CNxNUZ1IjANKXd>Oc{mc%h8324WoXOam`keTKZbFJOf8 zC3yIu9?ZM1!JOY6(0J}VI7DOq53GLm0UYPPhkd7S!qs^nAjA3sB!%1o zKf6otC8+^&GaF#h;bs_hq5--W*TAnU^{{G89oXtN!q)71$QjuLhcDMaSXnbXuelC0 z1uZb`>_vDttrq64XomE{Vu&fL0J&KuobRZD#L#LeIadj_=}izlCl7r3*TI5}N|0Zu z1=sLOC|z3xE%Is@Us49+gDOFH+&MTjumB$0SA$1kH8id%gN*rYFm_KKShm)I^@e&d zG--g3{#9_|WEGh9X@-&wRWPoy5*GJwglk8uAyZfa$3tqt`U-T?+UEg*a)qH2I%#- zlz(z+4dhO#fc_!pA#qD7yf?T6b@}CRZ(b=}y;})Zn&l9i)B^2k<#6JADd_Av2m4E^ zLDi!e?y2(m#TQ_$Ya^I0tB2fQmq2i?6fT!kf`&>9xO{4WCpispN1+kI4xNK}r|Mz- zzK6gpnM#p^kM>`>hF%dbU79XB~uHY=J#3O<-Ev2&(%U z!R~njjH{`Gh7)yQc)JROXK%nZk9-)bT?h9HtKj|L7C6pxZoE(lUaP9X-meL&tLi~g zPzgDHweZ`j7Jgl50R8LLpq*9&ngNY4U_>hvb(X?dk80>Os})-2)j-ff&ifZta4M-4 zzFP3VxZ`z5ak&US>*_&uZ5ibEZiGXn5Y&ocW#6=ODf^&wknvyYqf7+2^0)2hmFdO5OKQzg6iwwSAGSg z#8yF;TN&I`D}nKU>fpOU75H|PLf6#_7{RrYS6T%oUn^jzN+EbwRl^kDO6XWy4rh*) zLdf|txUb&?nO&K1cXJv1x1kC82Ij+RuR?f`n+I+FoClMNVRLB>6mH?VA5jd=nw3x# zTMkpV)j+Syr7)TwYg220G3#<+QcE+ucv=A`GfE+Ne<9$D3YZv_2d>o_U~#7q41G)B zs75YqU6={RyK-Qic^({U%!ZT$xuDtf6ds(<1M727Kzua=f-h%+Fx1u;W*H{Wp>*K-yYYwy?$bokcGC;*M7ot3p zL2GLoIOHb7eXC5ciB5*snOU&OCmkN;C4okGE-c!d3}li8N&BjyR+b9ssmU-}ITIF) zOM)Y_vtg8R3>-U{3cs$UL-mv*m{pwvXES4<#3Tb!T@t{=I~oRGP6zSecu0Ja2bT-u zpy5#@*abv^+&mp#jf;bS38{cpnNYAf1GZj`f?q1RU@|Qc!XBl8esT=>?v4RD&-HS2 z5`1BNu63!f`ei1x{+A8%lw43$jf2~&iSX`NIv9?MgEG|=sQ*<0XFuh^*wLAA=tdS; z#OFeyZ5~v=O5ysAfV6pW@OD84T-Jz!oQJV6eQqp#*%b@3Dq|sjc{u!Rj{>L7Y2fiR z4$js`f=x;stQ;E)Yir^mO(7Eg)P_OR<|O#2QUrCQQ^90IGWeZH21ny$c=jR`p6aB) ztBH*p|(m<+St z#e-NO6>PW1fkSK}^ec&hnJ;7E+lT~s-jB!gEfy3l5}?C4g~yZup$csb|5B*!FZxswDslha{Md>mZ38Vy54QLr(B&+#}0 zJkBKnyB@)NWh7`!jfMk*qM>nPJZwIh2r5q!VX{jSc$Xx?lWjbPli|=kFcB=PQsJ#r z1SoP07hx3Kaw&lN_*6)+&w^2!Nih3YEX+R?4JUVoL9u&0uv^hEd`28}IVVCvP9k`p zje|79cu*Rc08=X>Veynm(DX=zXG+PiG$9@?Rz*VJehJXaFBPI=Q=#--BzzS|fT&Lt zjNTaq5q$h5nh~)5Ya(nA$HTv)(U5r{936Q6l1m{f>V5lS-68^@6OHeW>E5w7%o*0lmi-TjAqoAZS64nk+gh+>Y zIQB0U%41`peQ6RL5=6jg#V{UwGJGEt3n$OUL$hxv7^H=Q^D|!mSBbC|WQ zf-AF9;OFpI*f2g0u7*dzy8jZOa9avwj!uH-?MYzJngUAYDR4C<1$G#u!nCpJpb$|F zk*i}N{B<0>AD9mBOtWFegETPt5eGAeq(Y1^g?oMmXwS|7ozi%y%8G?TKK78YiE#N< zD(nePfch8luy<7gJgSd^6Is!)X@3kX9T)?Ofze=PoCt5~)1mj%B-r~b6Sm7^;p4nS z5L@s#zC=Ozf;2Eb69dt@G4Q=M86I~=!>@>VxcW8$`jjTZ1kVIe&*O9cj)ZqTl3>u< zc*tprgXL}suxD>PsCXwr^Q%~J4vK>|y%=cx8V_ELNl>O756`|NLsDf59Qqv(r!ObK z(m8S9my`jX%E|D)IsqPDOone3Q4nw>8txk9fy>)O7`ikZ@-`+zFIf_7-4Y8@C9bD` zJU_iecH z2V&v=FV2~#@%%6D6Tnk38t(Tv3-`w-z?|dJAmBVn<#QJe&wvqa91rewx}jVH?TJw9 z8wazrVqm34DokG-1KER8%3G$ z%_d|(z6Ol}<@Mo^QWy(o;!{AlIv$2C z{J&1(;QX6dND;fbc=#Hf3(iH+;FFX9A^W)>t&D_D%|y6yKNjo;r9jK5O!()N2GxqO zuqGh|yu@&-miyiFF^*(E@ReIgikW^x-(3TSmU61o%%fTd&ok;?Frz8kop9&Wa#X-}|4ESi94khn$VO(7@e0h@vjyG~( z{-2~_IWN0{+R;e@%+;wlKDDibuNhC6~Th=LP$<1fZ1V{@MT^R z%uOf;@#1PIOUZ%GbvZC`Kqfzb9RJgk$8?M%rKm^|_NxO@nye$#p#wCN%wluivl?-nha^Xi>8I%>2fa~BQ=+A5P zJ3kW!Kg)+AEPCs0cAd!u!Z-Y|1#piwKE;gg(gAqSnltylb(6gKGU7~VfEX2id zV{tji78SyYkX-OI3WpJNLr^tyFmK$k&s{vf-vv9%#p9!D*XB zc+WK!zd9QfQ_{g^ZW{OoCBf!3xiEKNHY8=GKodXVeIA_Q zx>Qxp0n;SzXKh7LyP^tw{PRJzDIa?9wROggTzI}e2keLD!1c3faC&M9{Hn-<5u78h zEVH1jyZ~;GNrc6#6JZ>W*F!fC+@i~1!@M-uc_$Cf^~{IKz4KsdZ6>@}m<2Wy%E8F5 z2+B5b+?>l`@QFfLKC%o)^U}IYjuDLRC&3_`N6rqh*a? z^S%K_CDg%=$5#QeYT(H72B>mq09U^T7!Y3zD#}%0S6Kxy^UC1i&KhuvtpV@P<)CR< z1L=2~L3_jnu&!-|(=DZ-%i}ozs0K3sRl~^D4KVXcIap6?f*wbk;mZH_qIu2m*sBGm zEUkdz<1MiLUlrU-Z-pDp^)SY#1+4egLjvCiz28#_k_UC5F1ictVdp^LPy<;_%`nlt z0(_trVt3bq(7YLXL{&rLs9HEPwG(n9&%t@mR;b^54x)-1z*wgRf}8m`#Vzn(b_*Q( z!0#)}sDY>}M5@xI}gtZGQ0TUYGb$tV@pIQbtB1@rT~Sx4`=ot>97A#P{?i zP@jJRj>mSwtC%YIYY=IgH(=(th=&K>9B z%>0Y6M6C_-`?Nvm{&wgP@jd^o3jl8$z)QaZGWq`3gx?>U_^}M^c@76X8er9dHrOA0 z0W^kGLBqH<*sR+Cy(_99Dy#uK-c~~A=O)-DuYvCe>OeZM7G{{9hZ~#iMNlYhp9}Qa%TU-`gPhOgsEM*8vA!R>I&H zP0;W21z7c<4K7JLA?RllIPrOEnwntYuXcE4{t{k?bwJXrPRJ5>LXRcq;DOggSQmB$ zo;%dSSeF)9$YVD9Tn}t$Gvx1UhM@O%AUEzJ;DdW`?Meq&y={dD#q|K|YoN@k5&Zn? zL6&h194?-NO;rtWGPxNhZ*PY;hdRLX+-*qSdmW~pzXVAi`G0p#c?^=??O^470Y=?^ z08{Ea;mDyIFn{NB=?tT_G{dRD+YqgG3sS9aLBx_rVC4M< z?wx)Dc)APR3p?SC^cs}@cnn76S743pO;Fn32_c$~pMi0La*2QTWJcmiWao6yd5UBeHw)Ig!?fbvrFjK;` zw;w?wP{-kxN?2L^10H?+2fg)r^5>^2AjYVonx!H>xbqX%%~QbMDn9`1|HA&jo;d97 z4`_)022LOTLC8V@CZ1HrMDZ8!-trrw?tFrrK|N6T^(VY_`3h|CFPL+=2R?tVfJ#+8 z@TY1|#3w4aP|yqaCH291Y44$=LkU+r{sAv~|AZoQWjwFh8^>(d!S!Ezq0d_h&aUl^ zB z{=0#*s+c`ehzdKUxG`A|?>ZP^O@{>CuNmO`-o5c{wSH$p2O*Y}m0sB%^d*Z$E$ zr9b?41_7EFB2h*Z^~QD2d*SqR+8983NOQFDx|0F^%}_(;BE&les#qJ%^EB4O_es6+ z?{YE6Sd7!XH8FCBE(Rs4h0&hiwM<4Qq{&nj<@FuEj+kf7hgu`p+k{6mWQg~+ENt^8POLtqyn6O zQw=?x)KFPb1>=i);R=~D-rKK=t(|@OGiD_iQL2VZZx~`?BY%&R1c&reL1QH?tR10< zF*8JH;i->?S_U}igAp1g3`UDA8>|jD#c+On*s7!W0*@og5Do8{;E}z?Xs<2Cut)l+AEtq;RCRIJ zL^H%={m^ip9#$+i!K*6=V%rKcRC;2F?^FzM^W=dz*;pG*?h0^fg$(bes^HSqnt0%v zA@25)V8Iw;H0YzwIck7unOZp2Tn*Q4GsgRa^l+(_7G^)!#ElLTtUE78yCq`Oj?~Ap zTxEPWSAdg>CHPS&!a7rq9%Y`oSFy-oBn zrox6lUtWw^8>Be$k`eY&?1w+@X<*;deyBU4FYcV6iNnpb@$0(YnAlT-S(-AO5Uqzr zL!|gs*#MJN4bjG0g1J#vm>$&+>n&Ar{R=VHudG662^*2Ua1Ys`N#1S|j9VcTH? z)QT}g#od

W(h>IjX{QnkdHK+D`ar)L=AKvc*Z+<~aR@2vsX=QDKV-`bJsc z_d1T5x*3L)8DgBB1@3z^08e}wh?R3q@L9PLrmwNaryq@QueCZx-Zw$3d_(+r#|j6$ zwZ!YQT+l9dhHIqe*u}N< zG)@=GN7&*1gGT5)bpYC)H^(|A#O&_==>EnUg9HO`(kTaA^lSj$X&iv7GYs*XmKAOe zu*TU3&Cua%f0ULvV`i`v^_J@MUgL)MqipbzmNiNe^>Fjr{+Oj>h~Ik}qBLPNn(6gL z+lfQ4?-M4ak!s7vh4%$#d#P0e0E1{e(Z}&Puk<59qyNzKRnkV|w@yC{dT58sY5}jU zcEeJSSvaeg8wR#mq2h2yyj(dMgHBlC=YE{)zZp)}1l*rSIC?nYD~+-EJ!1%3A9TRf zBW7s%a5y%KC*qw^uBdut5?Se8G&zBk3+-uVQ3!XfD03b;gz_FxFN(IFPS-E@|G*ctfFdL&-wc~lN_$6_Nld>1nU6`pwD@dxgB&dv*6KU?FCy0K`!(H)bj?2xB} z<6=GWivL*jK0AunVKO#ze2%&~qeDNy%@Zczy?%rZQCrcb#0#HRjKuKM|KWchN22rl zX*l}%L=3&S9?ipM;Ool_jSHvYmLkAedq?A}X&$KkcnSu#PshF^H{yu1BXLC8T&yyg zisg&Eaq=;TY29fd|3}+khSsTXEnAAz?g`r$?AdARWz!wnbh zG2rYP{5xqO?(y@%J*HFeZqPhzxj7eCW%*!c_;$2kH5C`zd1A{g9~7OLk2@SzV`KM3 zl!kcW(&DkG*R}-TmrX<0_{9i9Uo2IfiI!afD7ZZz16=&^{oxfjt!f^goUs;PJX(Mz z(|4mxc`mk=1)##P06g#Ik4ySaK%2wEao^WT=(1rZ-u~`~y%GVFetF^4YqK%7!WYYT z&O*)QOEJ}JIre!l5y!rsk2A_P;3`~+!+y@kYSpQjowXGsm;2$~_v0~7cODMUUx9T$ z_u;suJ9rKL!%O=oW0}z`yyQI-SL|Pi3qLPI*M=!*`+hq9{mxM1z(x$7x)D=bSL4#5 zyK$Oy8g3jj8!M{Tp>5Yn+_gFYGrldvj%z{q;rBG|XTJDi{9NpLXf_@`vK>J%y{q%kh2O7Ss-yi?ic*;Jp%j$sdEy z|1CtpDjr+sdbFzi4=*ckLYKE|QK@|;mWHgtOP;>?H)sz2nzjRf9h!=N3zwqm%1wOT zzX8VtEXBn}Yw$6z`#G&?_;9#4?_--W|L|&x^)|5lb;beiD064o2&db8x{yM0bcK+ zb*ty1yl^cJxHkvu?0vB7@M`SQu>`I5_~NOUmE6DPqtc*-s4{py{>oW_w}&3Z63(~O zuElsXY%xyQx*i+7cH`tT2T}8NJm$RJi_UWnqrSs-JUDbG##~&0uWznJ^NM|VUpf*C z)ED9WGJm{z;|ylr+KnTdcVjTeZQ-snIP1nXw5(li~LI$K$^n`_QZBevI%4 z#*;jrKDPT&apP{x5p71RH{tmF?Fn4m;}q&`<2d{~fnVc~q5Jd0_(S<1_6%H!iy!@m z*YkpLSJ)9eDA<92;$!eo!g0K`VGAad9KdfjVR*MG6zBBWk5Qssyl*bYD_29Y`Px?W zkKTf-$_`-glyvm!+J{Hi2I1j~Q)uS76Fc2^N^a)Rb#`Kp_4IsKU>i_aj6u#|?i1aPi9u{2a9x9jv1< z;o1s3>9HKWM)EwqC1GaOKCE3)hLdDxO2z}tj`TaDeOb>-Bj!?j>gQHp}7CSDNOe}fjt`I z@pV`X-d+%dJNqT$px88ARec2Qw(r4Xw}P?rZzdiK55koki-BiSaWux^`LtwI>`uV@ zSK{zc#6cv(!{|A_8I7{fV)FT2DCInOwc!l<_#H#9@(`@gPr;TR;ka#8JWlF9gWHv2 zFiCb21sBht@4Mr8v7#P9Cj@`|+Jv@_`B?fg2(9!^;G(8aQe#PU*kKs6|DjKV~{;$f!u?H1$v+=M& zBF?J~Mtsy9`490Kr1RR+VhNa$V$nRC+*}9V$crO84-esWW z+T*yxJrYNb%tEV-vv_)DEGimCV_91qrp^k-X;+WqhwyM*8F&&m)J5Uu4;h$Jd30GeTuH&L>ghPQH5JERD#T0*!z=I8up}}Qjho^zAoMiuoDz?l(_`^xQ3iVL z$iffOqd0W)8T>Ld8E=L~;;(r{7<9V^mmN>XX2T$SmDY(fFCWI$BSZ0y=NW8T7J*w# z!f?ArK02smp&MMnQQ~^sl6eQs&XnPw2bH*ZPcr7uIfb`B#-O8H0*0Kc#@SemgXfgs zu+B#)Yi`DseT%T`c{ctzT!=45T|l+gV*DvcL1iARYf?7m-fBeWP4`i;rXCw@V=;F@ zJhlyLLTC4LnC~8r*BmOb+vx+AOD^Ky0b26oeG1TVb{6{dZN|FDdQ^Fri4W6S@qOS` zTx@*-o5D(P)A0;6_U=Nn?sz<$n}zbY40P@aMT_DJOzKyT6KnHP^0ft1{a>QlySMoC zRU<}*+(kdm!{4v(p;<@@hQ3U~4-4+%-}W1rw=xYa!)v)-({S?T^EmHs0~WowgL@ll zal6eO%u>6Ks`tt<&MX{%k59t`esSnnTY@SI@wm;u3UddZ$4OyPc<;mwT-GBMgCg$Z zr})pf{bUJBvtM9`TQk3pQifxe=HS5f3HU*;8t>1)iQh(C!~^x$u;fqyo>}w&6}&%S z=(0LAPPvXr+cMC?xE;5?ug8@|chUFDMNErs!BV|c{5tm>&KYDZ|29fTp8vbQ{Otr) zxz7((d6%tJp0L_P&i~m%{xH9v+$BR>eo)U!KEqI3Uf?B^d+r!2S30IHpWSUDZ%`D< zON|%E@A!$JI(DxQ{hi;3UZpD0dtYT55TQ(!gZt2fJ*rf{R*j;ED$}!8Wtw?Wm2&Q>Q2H_z zDnwP<`%8sp=&90wU8;1WMwQ0fs?oPEYBUT~DGJr-xLB2BbJfUVs2X|Is8Oj*mCkNa zr56Gg3R(L5W8+X{o9frOeZ$mz|n4a=RuS>Z3*T-)Pe7!CK@K zqDh(4G$>P3ixiYKDEXQinFgy<`Ezyh$x)>~lhtUii#nZ&RHM)d>U8v=GR;s^qtQEf z?(5aaZM-_|>#IfsrfZNwZ*@w&sZP1aG%5Uz23_*gqKo^qXpo*JHC)gjGbc^Ddqb1L zmT1ri(xCtTYS7GM8q{AygW3W$sf}q+iH9a_ysk+du3A(n)uIzmG|7(Ru13!y)62Ce>#-JfxocCll@@ue)1u&!+Ei|-L)#|m z&>jyhYIoJ4GKm&lUaw8M{j^DPSc|L_b!eZDHiZ{!Q+$jzE#YHro~upEziZJ76>Tc~ zq)TPLwP}!%7Nxz`qEGjAD5z44qWFU`j>~lD&0TF$e4<0v9a{ALgEnmtYE#xP9dbIV zL*4b-^t(x$9)8!Mt^i$X4%DU5&va?UQEfVxsY_eKbx5#4mlUSzQoTrzZa>f=3F&dp z=+NBzdbGMthmys*CtrqJ#ul-Bfo#zbpM4e zZO_rA>Tq2Otka`0_PX>WK#yGK=~23`E@c=CXicXc$-d~){DZpmb&7zJ7wM8loF1uf z5|ERt9?>gZN`9*76`~^kbpW&1e6yqpdL;_A|BV8vjY0c zzdY6nXsEFs_2+XAIwzp-CVDh`wtyCB_Y+;=+VKk zLQ+x@(sye;3gZ}$~>>~jg-xtxL3IX{K6wu@h0iAxs^ZYB|ToBSP2O;fWA*8G01ysdzIutJ= zt+PT}*Hc7y{s`!}ikN)6gtWb{kRG}U>8_cO%KZ3wxsc*M=}~QqkXG~-(flg{>a-V; zdXA7jx{0W3pMU}dVj5E|q$dSJnzowbq9CHiIv$&bh~{Pr>8Xl{ENX=``Kyph^hKm> zBcLe(Lb81&poGyPy3r`2SMLPmc3VhRE5x)YLQK|t{L35{k3~Wn_C`prW{Bx{f{-R0 z6O(CQF>RhMB;S)F>Ty#{6--2zz6(i3O-%D&3h7)k$F7)T%IDTK710HOh$6m-Xo#hl zGOh|~ua<}uIVL|jUeSw0)Vg0xSscfk zA0VPh3&eCLUzhsc5s}YMG0FCbXo0ne6po8%#1MX7RZO3Hh)Mf~kXB#gI9wJIdWpzt zl!Siw)Tgv9VoHC;vGx~Jgiu1gXY1454l$(~h-t?a32nEOlB!%xM_l!}&LlLE=hthz zggzNc==5U|eQFkwWkX+@@yd(@u_8M9Ohg{N#pG~BMBOtal)q6#g>@qG&X>^8HDVgy z%4=0Erd3g5I{%-9ln-$Xwn`{oCZ-c|5y4zB4dk3!envzFGo`fei=|oQ34g#B_kytkzsityjggpKG*vfrwt1izq->Oa=V<0FM9DXPp0UIHv}SsQ+tT zGtPyG(PFCOadj>i(LB!aiz1HMLm`=4iD;LPh|2UtB+BJ^@UixO|snjI`m0>w3KdX>C;043D>KH)=4EaqE_PJxLK$`!^vS=f~B%c#Zh?3FTt)Um~HiizQUt$#WfOKuT^B zx^rDZ{qBfq-Wxt&hJ;E7OR1&5gq9udL8EHSmc=41X8lTSFWakzx~-x8DRHy+a* zDXn=XAu-ovSDS=9CH?5N(0~qa5YvaHQhMquC4CT6$3`)sv6$+2NvL_Hm}~3vzv6iEar}6^QDP~*%zrR26>O3{8& zD*G!T=X@z?6^co}TtXq|rDS=WA3G_beLQdDV-os4O-6o65|U*}X#E%|-Tuq-UM``5 zE)uG_DyPs4jjX|#=O4GU4`bud%k2QCdn4}^Jo#8cU z;5AxuNJ@XVO6jM8l#Uwe(}#W%TJc3jc}HdR=ZKU>PVP_1{MvQOQWEx%(tVWDmNGibUv~-k`8r;Y`!c%bCZ*G{ z+;5~(vKI0G@->^@%caCyLqdt%KV{=2RKmya(}%~$ z^=8QD_g0aR))E;>Dhz0rTtsLTB#9@ddzwMPexQ zSIP0>9@XL`Bk3y{ZReiW*=|NlUhrIpnv&9b8Kv{wOba;%{L6`R!P`?xhHtqZIG-I? z$Y=uRqjrHlT?vy>$1MqM)Hk5NA}I+}IQR7QNuT3(V1R^d?@Fol9FOz40UdqFdBU}@ zBAN52TuMEb%jh|;ir3TJ#&Y}1EG78U^QD7R6hx^j=DICY^GBV}*+QoVB z&XUJIo7aoKFY=z0?}2#C`P>(>d2J6$DUipi#d$B~?*VUpy4Wryy$%`Ksq52kUYG43 zxo_oheyo&{m}^;;U%P*4n5|1H{=O<3p zr+pmvz3uws=F*qSLZwvGU!PpY%BYCXRew=N_bg;|r(8zSpJgNnl+j#{&uxy&?{JQ5 zgp`JN$*Ag|gf1?VQ5n~kBggqU$;iA_N^{O~UgYyRd-NkE-V584WK{V;MpNtb$um(# z|3or6K3<;+v$(%p=9-wPPm`^bk$hB%#FQfAL653SC z>t4mj;XQXY_mHG{G79VDIdkpJR^eKBA|;ihGMeEirOA_|6v|{&x{%korvbfxC!?hq zVhZHPsyOEd?U2x}3cg0-I(Rdi=Uv9vNUeO`b52T1)41<*?vCf2GUG9A;yk&>IeUil ziJo)481tNX&*{fKX7hU~P2n2YW-p^w&SAf7DLr$R(R|L2GBpEwS|Fum+*_WxOKAq@ z#bMr0G|ovVnfrm^92qURFCn&!pX-)Ug$4I8?l(ENq!hq$_`00;vPTli;_ETz<9vNJ zMoN)}GP=8(YnfyIU^VYwQ#sCg9N%5sf1`NM*5kV6+B(t4fTFBqG-tewMsqKSSgKEt z!#FNId0f2KHBWO`+FOTVJAANfNL_+dwJWj6Z&uKC`!#OmI ze~;nX+Y+o#71yQ2c)g6a^7?m4sn-a7O66QC@Q_gwuc<}?uPgV5rbGHvc9`dWMn>At z3~25jDXkhRqbRPa+$I?vnxRiB+&@{3K53b1A3}%mUh!T=52c0_ z&tJn48C~c3+PcZ;=3_qIAANF9;{IO3*8-g1Ki&IM%Wwlaz?&V5zis(7ymmn<{Cc5`*81~24;#?N7cv^?Wkm71Jf0Q2|8mcq%p@~2yyttqAG$;{)7@34s!mn!RMU5DvB;3$ZSwqAa-9)2IYivFq^)vj{2^<5}Wzg-H>u}PIf%rki1 zJLXXFpJ@ljY%+VTO%^%rvWB_An0b^b{o<=M z;*ijEheXYG$fF{53GQi=leesr?;+#3%O+cA+NBeHK?0xsxV=qAkF?3R$5wHav`Y2E zHu?QOm&9;x8^2p6$7riu;atKBQU@tEDgV?i{x>!m^o{GT>yUe|Y%;QgLsI{=$+|G7 zyxn7yz&ga;JG;C(XqU{Mc6nUDF8B4ED`SzY#@wRTAT(oQ+s*dvDd4w?4BCW-5uV!6*QfIXa&u)`sD z;vCYnmR(x^>Xc7)95OD^DHWbOWPAgMWS(-$gCTYqKhP~Jv)r<4h)cefbIF-VmsEJ; zlr2}>l2zR;>ZMCMgt^5y!6QebJ+f?sM;!BGq_y26?hLOCjQ7a2{vPS&^vJ2T9`TKF z$-W~oGU|dyT7|mhpYoh{T&oyS&%Khe*DrBZyz=I|SH?YvkpQcgyxS*}#(AY#$r#C*8Y2}r{@I#d zsryHa-232_zLS0O-zh)w>yximpG+(eBWq{4CE-nsMA!658pk-h$}5+rdgcBbUfbIz zzv+E4d#6uQ>vMibVcSB_-GNauTg`CiH=QMG(hw-@E}BS!WukCD@Ld~)!iN3wGH z$GlRKKg%Yc^r+~SkITJcclagD>qKKZ-6pZ<+wFZapzo-tCc1^-T+?+Nlr{q;VH?Z{^yWo_o1 zPxj~ci<|3NS=1-%hIysXKVE4Q%=K0BNza*HX?of(Ena%1$4Z~%;`7|=yyP1`IkVp@ z5A*n>c1y2px$BYs9RC}in`rRK;Q~G>&w02%d*%BqpS(1A<=P)l?TU)uN!gl zn_oVcqYlS<<#D)I`YiNI)ON1~kDX+qJW2I=_SlP7SFV~)WWrFym9G}^hatrO@m0!2{ zWm1}7Hm~7aiHROxIgdkJ>pQ6*+E?MQ=eSv z>X(S3u~OnMp9Ez1M4RZ7#`Ar$E5R=olV7H_@=L$HUioE;UwUlz$;0hFsj%HI)#829 z>XuiIW&0$ekWZ@Y@yhXnetAH-n$E_`4tK0{`}!+taM&Vyu2oM8u+C77QZCCr(DbVrRGMztfWl4k0u^_ z`^3`GC!Z+0C)Cj?%Anj-zw9G69R65Y#p}B?jg`82{8FH~PY#^$%Z5Hac~s3O#}E2s z7JWd;Mn3t)?w8_R^V3T)5`L62Cnkq~@=47Izuao#mjnEJH z%`aD||DS>MJ+#Mv^K*{G%DLHo8N4`7{-Ulz zZu;awJ)a!8?Uh>mKJlAhbS-_d^CbP}8ov}9<(2;$Qx?R5J%?8=<&KqM#Ki&n+?Vuo zh3IFpx6+pC`eh=o89mD<*@48%Y@cLIp#OMIn_fz+#}czoh|?B6$-SR;aF_V)8%san zmvEm?f@1tq`-@k;(xx{}^Gau{PYTl4B>m=-u%C3$Ix@p7Dc7~+%CHT}|PJN&ihT1Sh-u9w%)`iDkVzU{>e)d)$xlDGWx7vFP|t_9 zQO1S+l77xBYXW2C)LqKw5V1VLFZDPlPj}*_wNE zcgDv`#u3Jr?mn?~@k`4Qei;?um&de0kDk70EoHo#_j0a1E_mgA3CgB@j7-=ZBmMbv zFY53;@enr8C+%zd<=57J3F_ySO}*mf-cI`UtHkgLpB$RTShUtF+cUiq+TSmCJIBgI z`iB#xd=mLT+I&O5oW1Ci8=Pw+elM`!D;McYQ|vzJLY=fC#ojBIT$@GoH=#JmL zQtoDq9PHwipB(?kYp=Ye{}{E+Cx5;5%C5eQUBqPKcE;Auw9hY;&mCUJm^hm@@rn9G zZ@-M|z_nNPNnYx*vX6eq?U#+a=>IuJn@GRh=|%l7@JSucV~5Tw?{<6TBylz&oAM-1 zo-v;M_}wQLN6`l3xZe9-Ss&<=+Z-#q8D+tfdwMtZNyS!NTW-IsC3f;vjFpRAtI3MaJ_|T*s4`|BFraGd=Q4zZkhqIgjoXBZ~%j1qIZyOSjp<&QTZFMg#Y#EaY6SICoNF8S%q#6_U#A}VN7h)Wu))V#O1$)cLEERET7}b3&G(a=F`hffA8Pre32pq= z>R7ow*Dp5)dSzHO#yG}_c*cff^j-b(_+%jMpmTelJYPs(McjvX^GV?>;G$?vt_OiFd}K3@3GUjN?7<%PPizQ;F1h z8;(bNc~RRZg>QRh9oH6jg1pT}zqB|;=5UXJ$6xkF%u_Ql-mpHXf1u^_2d}2 zM!o$p+#?ah)3H4Ck;L^A>efxYpZ+gKs@C_*F#497#PZfdadLc;SE7E4k%75lK=JUP^{F*M_(99-SM7$xnrbTZN{qx-+)Ym+>9Lh_4taHoe`A*5Eu8dn;@_dL>cK%JS-N`9U zH#%kBIj77y=G94 zBsX@-!@LeT^4%#{%e%z=mrL?6k21DmZZXeEJ7#WF-y!FeOKe4)GP|)$+LmyNjCM-Y z54&tR=aPY?T~fS~TPA(;NXuqUN&M3x}uNPH9usEkEu!r3En)JKZgF;#{&bH}@5GIK|h| zCP`~KXD9REJ{~E#)FB04GJihdl8m{mhh#g&Wpv5iy>3b8dbTkap8muxIgUC-@t(G{ zn?Lrsv40rfllHcl-QH@+OzPVE> zj&jIz+T(xB!#`efNx=mkSw7e$bI!YDM+ukozu=O?Tln``4hj0*BY$wb>~$^~(%UVs zjx$GJM%$ymIM~1=CkA=sPGOhmesjybv2NLEh>`0h9b#wQV;ir3x!WUeZ@6XqBD)kP zMt(c%k%yB#Vp_vzKXFUv&73P`xoNdWE)zQy|8>h>V>}YuotR2<%ZFu7S=PiYeU3Qg zKk|mzW9Zj+JEiVzmsDHql!58A6^BO-AEM8$KzU5}$iU;|(2LxXNI(37_Euo0TU@EE z19Xj%A{ibzSII54hInM?O}7+b?c`Wtw;W(iq2+gvY-{O}_r&>T`ot0B3bW|@v)jc> z!I|+AGdNyM8F4bZV4Qqg7AIxb#z=arc=?<=URnjmNdtcM{1zvxU&l#clQ;>T8Y=>VB_b}deR4JIe(8z+07#Y&kD@v@)S-}^UCe)%O<(u>4P z4{w~b-xDXrZ^TN}@;G^VIZlq&ikFGYVAW<-2-poJ^&@BDTayuDEy^XecPd zBu*ZFkCT&?;^otU7}0%*le-@a$;X!Q(l0zt_MeKA5ohBhl6L-3V)-zTb-j0)9C5Q(tR*bFVJ4(gLlaP2h!2JFFW^(ZcemQE1Weq=83X?bO`!`nN$r*iIf6gj#;&~n`&C0~e zZw39bKPpaYkaO;+%Uo|}tbETED@&PU{23A}lUa`#eAh2|@-o+|<(Kv)$can%rOhF7 z7V@E=%)6^xB|rE3jMk=q-+zP>?JQP&igJ!`sEF|)q(0<6UQAf zl{Kg}Ub&m<6W#h4DM}l4pC;E~PP>HsGHDd~>lLq*tw#Rdl02BXO#UvcWohJJ1!H9! zxqi7TRRU8!^t)Mtl6;M^Jf*WoM&yWQz2eY9^8jH(wA;AQnRQ}T6Xuz$^ML8 zZDP3Bz}zg+E7w>@IZG~uIbP{HDn_1hK06D=NFL_3rvhGvnG|{+{8L9J4&)-$oyFE=De_;`7tJ(yy>r;Ej>>lVhap zjTpH}zUmq0ks2RZYh)h!Tb5TQlUM$8GDiNJ%eoHda)GtHLcjXtnu#^5iC$^4pEde1 zPI=YaCSy-HlbxP}l4ynY?A}svkkZv;^QY;^9x%8Vqc;8IM zy(O%fFIZ=j2?=(&eA_0~->^R3%_=VL@g0b^%aA1QKkzdK+rzncm1&ia4XvU-Xq94p zZStCR>_OaT&CmV!QLL4J_?vsH4zqlz&d;H6&+O!LvlKU*rA-^NEa%?oeeUTES`sZG z+`sMIz%1*(N6Chx(XyAHH~L??QOduGmWurBR?jL1`R9of8|&E+zrzmI(1G@)op|!t zh85yO?ZXbNy3TjIkGRmQl@tHVc4O2ZF1|nOM#ULcD!-~!C%y`(rim{WdxR%p~A}Rd2uoW%$Sg`$^8I`YE zaB8X<>u*^RywifBzgrPA(2UTv78J-~#+oH&)azqGk33dnPcUQYFbixJ8(edpsPfH* z^Y^US{lJcmZS6=uX+^L1He?pEAz`5dADTPR^^F~?TiMZMm;=ieInbz*6P~>e#5Z)} z#$OKnsLX3#J8(@o@$;$`|CP66;7L1*y|&`=1UqW3bRy!I71efI(f*?a1KygE#rJ|c z{bE6+(TX{BEC}mo!SH{~C>mwPt=eYX*YbG3-bVJ5o6>b443eZU@dLSz+V5 z_-m|oRBC9${g!;k{hSS@YS_9y`cAT@K^a(4Tx3J>x#a0w3OF4D1;l^zn zd_QdH8AkcCPr-&FcD(vwL9Rkh_-9)&v5g&1o?9`!rWGHD*$@Mw+` z-gpO26|&$d*QNUN=btvdJL|;5fmZA*;lz)B9GJ#6Hk@fg^a}@UH5^FY?ZnQ%xDJ;S z+uAtMl+Qf0(2f>vCu$t9BlD6S?>%-5*kgxbiUaF&^4<1QPOPip#K}r_Ox)$bkeuuR zJ;#CEZ)~vVv0>yFJ2KijFrb(VmJB<5k6d`{cfpnFgzD|Y#qJKwyX(Nc)pi8e;(C@k zQMQ%?!T;IOlk(R*ZuC6l#L9FVy1cNV*C7YgY8%>c&7%i85H!q%`JoP+FmX)UNAo6p zF8c~xKj%PNBPZ&Nb0CiIil5X3l=0x+q9mx06fxp+%?jEs!!9fR3pK>9#yb}!_4m_mo=4x$+lg|NVc(R`p zM;ka$w<~qUxlH-Ych;BL@v)l|?+Ut5jlHrf4!5IjybEo^9cUZpV!s9I;g%h3#<<|g zav`Fa9ko9@q3h$s=d-l)D%8ytC$xihoKJTnpsE7}D9a%Je`0_eZ!S6UzJL?42W)uT z--&{p$H-{TZzy{!ggcRIg#))2yD-?{LiblL9O9Z96TiL3QU{dFre;p`r>>v9aU#cI z>Zqjy18Ij9^LpSc>q1xR=p1F2YaQoW$BC=FcRAPe zdQ%t13~=zB9XB5T;zkLF6ZQXLpYZoiY@klwm|V!Be2O>qKu6ukG8c9=CGKjvF@U`t zGKRacxuqMQd)T38p9E7bCpL#tf7=~6+QEg|87_=$=|b`$7f$wY!MDf-UA7bXs&hS= zPRzLH#P5H(P&d(qUx=M0m7Q4A)PcK{)5c3~>^bU0qYNi}l*c*tOc*hWw!*&U53f3~ zB;Ag%F-};h&wGRINN7)45qD#5+W9^hv9-#M|G0)DeQeMt*f8X@lRY6Qd&=zl5c(VX z_rCvg;Qmtwx|MgqS=xbOTdWxNn-i-mIMC;n9b0&RiOvp$)vzLsYii%sj;dDnBj{&E zCHlqk0d_1qY)3sEb^pbQ9UB~Isd@0*T`vaDb)x#eR_u94oPMB>@p`b|?nEd0<7L!i z7VU0DPba#Z_2LKP%-ySYm@ipz;l2ayXuD@Rux|o;zp3w~kI!8%m6gqz|GT zdoou2H_?NYdj3wIzKVLUPQQ74v=dLGZTPgxiIVjHUA;Ce=RCuL?D##6*e_~Dx8*jR ziL~Q3vDNgB6@BQh+V5lE5RSiYoC)WaS#V~T8HKNy5FHhbk9ni{PQDE@XgiZPSrERN zeH|E!&KI_!b$9+fzYYD5Tj8bd$~3W|`z{OYE$wL7$$>1Z1FXGZ)Giw?=W-!9l5418 z!>k_hc$e3XM-`lCO)S}_+mRLSM$!N;y6asSu+fH^=iR8Yy&w*Baw4>a6UQ&O5YG7d z{*wa-+u2d2za4uPFs{*V@6X{n88>@gw4;IDiblolC`i9JrkD*KcV*pPk5hJl&`-j_Cfpl>YN(*&!L_EOS@=T9xzT7Xz3=0*@lwg*;p zO=bTYj%#0FLvi|;b9cx$=tHOHccLb_!e4h>=>O8n-UBu)3}uYASgqILu);clftqs5Jw4wxclzzpA z&dFBPXyL%kF*e-JZAXn{JKA@)-eccJ6dwFnvB< zrP)xRgazlH*ie~kQ}gXuvCodY)LFy9^wIQbHTKX}&sY)H(TW@Fm9mx?n=#pno}FFT zS%xu|^Ec(NVF`QAoFeDjIMxAUgbf|3^E1g71YfaWF>#ptPb*HgwPN{T8|HZ!-=|w4 zUOU>+zRvOInqos}Nh=OMw?X1;c<8kt?Pp=Ur;N_XgRA_I(yf$XcxM+pHg$?ic zd}pi~H#z>#0LJ>YHhhm@{LXE~Tdx&+-FCjK=f+CLbg2nl)P^&`ED_T73JA)r@;bp4(g^LpOKi$gLTBg*5ApUiclW(ae8v>=bMR*r*`Ou z(wEnAqxb~JxSQLI zj@HtQ+ctQ<7RNKjskQ8*)bffIYns_`;*$kE|Fhvug1e+Apnqn;J#o0i1V3U=K8 zyBO*`WbdXa7IbQDMy>M}Y`A1W<1jORO*i4gPc!PDH6ySN?Ki=R>sAYP^185W8>Xk3 zkkrtGcCSpRaovP>L(It0i=6ob{aTO%d&-d)AGcx4BhJ5+1vTl%3fwiK(kKhYeX(LZ zG4$v*{XTVbw3-FKW}DG+S~R-OGvh}=;%BlM6WB9q7IT~)<)Z)3pYAkvqxM^|?wtuORvY^ixN$^INXeG>x?+s)`W6BX?NQUu$407 z_eIed(a?Z#i%sZL&4k4HMqG)H#++qZ}eFAM>N{$%qThEj2a&esJYRIe0fcnKHGpTQ5JM-$-a7rjOg*!fXU0E(fhY3 z%&cofK!6byO6zgFLNr#fzt-qTJ;qhhqv-i)RPY$E>?eD&{h>!k_G{}9qQ_{i>1Ew$ z9JLuSlFwOK(umP3qH!hDh@p*)cph(rKh%i2GY$9@YeeyLdaU9Y8+JvZM0^xH2MyT8 z{&-Id8c?)h6i!hNgTf5B+(n1F*CJpy8E|fUB(~g-!ogGnYTS-QSgk1h(}!o)l!`>D zbR%Z_jX1VSk0Ne8{_1FehkfwA|7yaJ3?sVMGhxpt1Aceu;m$I$=T;>0!-$)9J^rIy zid{BhN=`kp%jxlhz0+rp}9uL{`QMWP*Z78eb({=cLI5A22@5+uuOZtY4 zB}Q!h5QW;AQTV)``qUzk@yL8#oZ$D>z8nZaGd5%qA^gX zM~`Sd7PpE*SsY8!<;Rvd&!_fkI{COx6$0kR@+%yy= z$3@_msd|*Ss>7*)dKeG^eOMH_t&c*NuTj`cpWQ4y99~@{djUjXQ+Nbw-;O}exe>TJ zIuf5bzmxsLag_e%e`_O=KwR8;8;SEhBGG!69ywpoo_*n%^)UiP2l0QWjrhBk0c*Ja zS&t%+Ou5z_qC@jWI<%k5IqcG*RKF-}8LmUWn~~_F>2P{)B=rA=qj;MLyiV33p|}p! zD(I2J5rrvN!m&Cw3U3=xXNBp@e38^S=f$}d9zp*#GK#&qqR8WQ>>FXn@=p%i-66f!+{z)EV~ndKNBM`eO45P@|g$P>S!zBc+7rwWe!AO zMo}GhIU+E4lpdpQMj(Bwjx`oNEUlvPcBc{J&Kh72)?vrbC{%WEzFQ;Esk|PEf9tSt znGVy8k@!29z3awP_Iq?N3^bti9IoMTBwn|P#O`%rI8K>ep)R{!567}N9q#UAFUXBi zxY;-owmp$3wor%cph$f88PF_;0V(<@_AfJ__+uUZi(?~@g0&U5^H$P+7(ea;f%y} zdlZ)RwPVKL5g0H$68DY7;toB^RiV6kQ9fDpYx5$JO53hCR*zon$@;%~(RedH9Chm% z5cR(>%zqn(=fq#daRyZCVZ@9F!tB-R!t zw%$j=+9DE>>>Ik4e~(?QL)sah7uF^UU6)0|{yY?E9U^ei5sK>rBT$uox$hz!eytaU z0QS|Kdm$2a3+wQ(ZWJ;m>Ts-cIKCIuu|HV^68Gv*u6Q_ZjSt1UK6-4Y-q)8j0Rd4s zR!s*Bd!hz6jY8R*VYvQ20*13;=+78%&|tusDG_)|Uzzxjy-w*X_Uz<6@m&A2XdJJ` z*!|6jtWwcveO8Ax6-+q&!H8bP4fvWX8b`XZ&*}v;F6GrD*KQ+w&oUOBkHoigQCPb= z3Ok2IqIJUg|MqJY&Br8f&-f5on-1 z3mP$RnjW8aMx)Pl9gaWM;~3-hxfmnT;w_k3z=D(H#-kYbJB()zUYq#_^Q{}?khOf# z*j|V^qsM|11B=FG2GQ=hIP(fCFl)9N1^V#tdy%7}7@&8Rlh zj6&6;@k?tX$~4v^l0GVsw(5>X0|k=LA(jN z(&Ufrt!R zWPdn2dL4Jbztna8_amKI~pDCnsFs58vYpmtwl5*I4u}VK6i}qG@N{9_jj)Q zhJoDB%w9zfbf9hqldl5F<>% z%;iQ#qqChnkDO=!Q1Yd9Cj6PphC7!HXvzBOvk1ngfhLsgU;!GkFDZFLgDuhceAI#; z&5g*x`t-%lCNwA(jT1Y`3yYXAZA29AbTcAw7j2p`xMU>z!JdplTXN3zQzCJJTq$d} z1rlOG!YKp$8JW=OHMt7w;EYs6q{ixc~phb{b<`y*-O+O)~Dw8vhR zDWeGT@0*k*bMm+aW=tJqgmT$Lt|HmBZq=h*Vea})abD|R# z+@}v|^qINENh3D*F(ZLos&aHR>NunEqXK1~-+*}!7!#7Ym%;pNG3yug=tt}P&kn6a z6b6;iV?|aNIx-&!$uvNbUv_E0oQN@Oaarr z6LSs&2F!`X68bpH3YPmzwPt!k=T>T-o8H#h|Nox4+uqN z#_?gf^$4TA{6Sd+o?~v(+lc+2LUHm6bA?w?co@UHqZo6T!I22<8pht%j9=uu4aboa zNCeLBjl#z6I<)Di#~@!6dftt~^o!*A%o(0HkH($s2z-vzVL($e#y*cgv%lC2+7pE) zHS%Lbfe5VQ`J&ynN1^}W2%LQtiXwZ%;Tjf!d#A$ip}G-`zJ}md-zdx?FZ;Nje8w7q zwe)2VGQ*MNG@#>?5OggZgpUm(FzL?-*5WDm$K;LV5`9}m;J`t$KAj6=(i|> z{i8$hH+!Q$ZEVDV(qYh;H=chL0v&tJk0sywo@~Ih%rLBa7LH3tqj05Y7z$1 zjAbwVrs1f}>pO1`nTI9`-20eoi zc0V_os30t>ABr85gZ^*cQFTKxp+gv&BLs;#LvXoPK9m`pAImAn=I?`Wkes(wr6_D) zAI5%ddi1>=jxB3KVSE#eGEsr(IXwi%M*%21A}^}O=EnT%%=g&G{>7URR2&LC{gJ-(Ke9jMy{$ zL^x{C3qgG4U~IdU7c;ol+h=J9IcXOgB5>wdINq|CeaZeIC|oKCZ+nH|x0WIBZqAEJ zMS~HYogaI{gRrt`0A9BY#*xCIm|HUdrfLCLw=NI9)(%F~Oxj%Q{MfoU2qi{_p*qh} zxX&|y7qksUOXd};h~ElDLecMXD5n1sj4d_uQjYB9u8Y9Zfq}@gJ(%mvhep;A%>0%Y zf0T&8s;YTWtP9WkEtCUAz6RpQiQHHalmnMWe9|6l3c@y??NH`+UL2kIMVs0r7xLEr zsU3{^sYN`;3)i2`>uV`TAp~J-JkSIv%Y?>r38M~ zhQ@u-PPhD~9N-?jr z%bBmWyMMmaY9INaJ$F6UZoYf2z2|u&73#j#_I7xob)58A``G7|rb~LGWq*9G9jWs{ z3;KCOYfIP5EAGWlH|gn*aQyz4m?6A{us#fSHsrt8 zntk3Aty_iHTFB&gTIIwBL7r(oWdEY1-sBTH^WV+Lu1>G*7E+ z?d|->+M>g6v@tv*Syjq`(>LB}6|dyR#~dHDujRjJ!;ZeyN-q7R&0hRQ>&@%V4En4k z#C+89>Aq-|$sE7ND=m0uw)Wx4M=fy1CvD=(PukH7xiRa^5A7f7vV7%d+P&eAwZ8FR zwY8@|X`R-6(zfy3k!Ni`Y7PJQT3hA&tj(*Ftu-{~#J&2tFfQVUHepX5^h(HqdvjlE zVbeZogPPveu2p@c$>8^zW%m!QsP~z6sp~uK-*I2Hd*1ijn71#rwBJ5!m(D%aey)D6 zwR-qjt2gtrR?Yfa>y-9Ji(K(qJG=9>2A)&BZ^3J=O3yFa-rBDwz|9WVTjHo@fo-U$plpUuv@Ek(RvryEg07J8i;?8s=X+3Piy$_8PCbh*5=NArF|TKPdoPd zm6o&Z8*OpNS6bAR>sse0*;>%kr&=zKnNjYUmNxXMc8xmt?0%{hrA_qPcwhS~#~W>Q zAKF8g=UT00ue2o-ZfQGT+|%|Ixuq2#*1wm#qiqhnuI+a`(3Vcl)`~p8r}b`kM;qVo zv3C4Nw)V^W`-?f7Ku4?(cx3t3dUTX=oyK=Pwr{rzNZa}d!W_pbVobY^qtlz>5lfS#C`2-<2PEHr4O}vV_#_l zRzK3h{w9VJUTZJkJl2k!d7$_&IUEf~u4UEBMW@vKH99AUXJw^Dw5 zTN;4Q8;I)P2GJM^db(W1LJX{NG!hP;Q1A+d49)tI}R`L@|>Y~Bo*Zz zT2n7(H!z_L>!#!RS%oK849I`YkK%869?n8HW1AgInX^tk;KE++y$={*#r>mpT-p_b z{`~BpV=MPRtMjuOe@7uW#(=lW`MCqX7qZ=gpLe`B zNY?7caBtxTYwfe^7%;S+36p~JV3*RP(RTy-z0hIiu`n!S?KRi6Q0!?GfUD0!vFalE zVA}vBC6X^(%YhAB@}tr_)|U6_@wXO+(ZzJwelZkNYOtd*-ScTXXRo5)%$F$7;Yji84oG z3hR_>$Aw_&m@veR3`VZ9!KiU4CpP%rH6oXm?ukwC89t$7g24vIiVlR#YV8-VzdK^SyBH#XJ%rj_BjT{|Y` z#npvBv`*hSw+1<}@kS6D-VVm1+&{E45jjydBnYv&b3+D_qs-uvbg7jS3r+=M;J_TX{Uv~Pv`^aN zRqwSkw6QOfK58@T<-zLrd68U|`Ow^8Tu8}>an9W6(;*iMKhA*x`vY;Z+)piRNeGd#9Xv zu<%W8w7roF`8HDq9|B;=%8BOlb0gttAj-xCpje-LaD53x=79Y0w#vu)eNHSZ6vkfo z!MHO!Kc4i>kCV;vpnJ1GoX-eE`rJT#p+0(b$cqmPL-77p0QxW|Ih!L4&WtcTws7BI zULb1JXAaN2=fk7`7zZ$q95qX2j!jaQ7jsluelM{tNp1flL$yDgq=M|pDj;{FGL=eH zOL8Ww+aa^nVgD@EvdJ9PEGU84J~ag%_xLN9U^bRp+VnYV$d6mO7c9p`KJuP&1Du zs!|hDRC-vFy3saS%^IAddLK_x@t0Fo(T#J|qDKj8=)eRuxoL{pbw5%4ES{k1G)_=$ z^CYM?9}?7vesh#-SfW}xIYsr$n5~LEO;Z(rO;u&~r>Z*3lGT=~393(@MD=lAs(RXo zYvTz4gLh`CfT0;G;Z~Yz-ZNcwv}dW>nVIUw#d%5%%T&vcrmNYd(^SFBbJeGYiE6{* z6!rTbNviOf6jkrwY<2QNiW(c1tjqeMnRXVpA0Aq^fE|l9jVyl6qV|MNO-hs7mgeqy8j5_VGEovU60+ zVM*%r;bb*?a*`@@XpYh^NmOa=l2sXBqIxnXS)~yxM_$cQpWh`YLz!e%>fbE2w|kmO z?3tvDPf}IN`$QE*ed$t?RiZmtEzXsq+H-Cf&nBw8R})mHqDgAX@D$bRQi|H@=Q=i~ zs>Zr>wPj_py4N{H6)KvhMz2g&xh|%vExl9KzV_*A-0~Flv0IYzIMdZDPqK=vn533Q zq^pU2)6~ah8S3?N-ajc>^{8~l>9$E~+rP;wUQ1F}45_MZ)kM|wRJ!Uz zEPPp*s)k%oQ-`>&){ac&`!z|KRf?J!oTaALPFMGrrmAto!V!0hDp4*?W%2%3`_t5t zf$8e#m^2kXH$?@OPEwinRMo#&no1|m>Kl?({`qNYO+bRONs5ZhNL8EkX)1N=Y&GUd zqI&x*RaF_8qAt%*QFq=Zt2)cmRG)f$Zq5ucY4TRqtGydR#wUwZ538&Oc33?IxwFCbu%x z?%tUy@6J?p=VONIRxeo%y){=2%}!TMtEQ?1TZ%G;WT?W|Q`JV5rfQr`S5+#et6jM= zRsTDg>hB_nDt>Re`YkS9g&ybn94V@AsWjDgWx7h_*PI1uD!CW0ZbKQ;7R29yEuayx(?;cDOuKb?B9*R{foz ziaktI-#LeRpXpP+rm9EIG-a5RrcAH1)Q)MX>gug@_2F>3D*Io$%5yDM9j~6C_UuSg zC3>f;mqk-mk&&rt8rL)8Mw%MmI8EjMnx^`#PE|AS&r=mTCaC-~(^XM_s=5}Lp?WP% zQOh@Fs=mdN)sDu=s`uF}wQmt^*_Nq(@1CN9{+FQ+#u68olhv>x>B^liRh6ujrjE@{ zSHBKUQ=z9)Rj;zC>PQjVRkLJuls@wIKdH);KTUnnGSt#3>8egH+V@`>>fFf;RqR8$ z+Eh19&76{^>V8R6Ly6N37w4+4_B552ovJdAXR7ou8EVe+`RZufEM-5Op?<{AQ?rQk zsOO^PNs!E5Y zDc^-GwfXZZ)nP`O`e*7&bv!gp?Px+wgw9oM+hnO>Nl9u(*SV@TZ7{ns=Qlq=jUSq- z&d!~q3hR?p&-+R0;gE%D|Fb!2lXCG|cd zV6HmYVxIbYZ<>0#C_~Ncm#XT<)2HsvRNXIdJ|!~L`SBTQ>;KYJNK}@Z#d-dAFhf0c zq^ZSA=Bey)S?Z!ALpjc6s7ukbC8Vgi>*lIlx->N-U8!kQj{0lc}E4HY!p#wVP(D3C&Z~U0|Bs{Vj>qzS+a!{< z7L^dCLZyj15AE@7Pn*oFjFi1MX_t|`XC!+^Dne93h_Xim4K(z--uI6_`aJjjT-P~{ zb6)pz-pqfr+uf&=@*(7E7fs>YLMZD(7CGjQ zgcAZNzAun?BJWb|rf@3t2`6WR2wJ^8luC_5=$LUZ3D3SqB@8Fmp9E8>b`Z_j9zy>% zhS68~V9MPQOpi}6+M@%Qj|!w$cSFg?DxAtq?^D`DAo(&MxO^AOKhA`b&g~#dV=+af z;~sgousnldQ8e@pB_0i-!KOgkaFF3xDwMu$45ufyAv7izOmXY)lQzSbJRifkOCXu- z3#KiMw{Vsdv<}>(Id6k#hf4@~3p4-6__Tc+N@sk6$esBUNA_KBmQ%Rg4yK#~!BmkQ zL}&H{kl2Y(%KpxDrxQ*$YXhmUHH6+-gp=k#FzrlcHnxKK7#7biZwn%_4yWvCk#y!l zG?m{7rO&e?=m*OmB&-5Rqbr!$G%3=1#rALtq6aKatbGzjSt8+ddP_JB{0^s(+DJP3 zllc*wP&yuRpWckzBfkw1bWJmy^xeZrnB{5w5>d3Ij$z?lBpob|qJKV-bhtQzbaFx{ zGv+>x&x)kQFXHIfzi4{+F@zS*il)HN5#(eTO)EO0$loK34lN2H;SIsWw=t4ZTqDSF z(|tN-7eYHXhLY06eNtk6(9SHBVhos{Qw}8uQ3%{e$-9OTZ$u;w$AnOeY!uy@8cE5uQDh<+PWKN)P^Enw zz4eQruB-@pQyoFQ%pae9e4p&3;^^E^6kVYE^sGFbrre04G76*fx)F3TAe8iGFd452 zp^^W~Cn|#J@WKd+&IqDa#ldv>ayS`FhtY^e1aa*RqB16v_Teyc9SxxccY?@rS|E)_ zFdrllL?UcIBe^h&>xrOD=Ll-p6-k`y!syJ85L%-cK?~P~(BrA0H1B9I6(utN&+J5? zGmKWe2_suDT$taX^*j-@eQOwbE3!PgG?H>&N7JHt;k0U19BtQ+rrep4G^80q9c-N$ z-y=yqm|<&2H1Re^Gu%bewFzeHSMSs5Tha6hLHP{hR~JOk@rYPDToFOD1Y$|;V>s0{ zgiw)XBn3D|(k;bUYHo}neUU^O%1j{X%`wz%7DMt2!b#;%ICU*xK6@gZ3YSHYB%|Mp zKbHF6MAFIHL{fSkN%eJ+G;lDQI9r*mKMSL#sz};2Hn+iwCmc#;OQVVF z8q;NeJP8R#Q6Zy8ktdp@`QvEIwFv6CnnZ@L<49E|iOMBo=+C_*%BV}G_~X$u$?os; zj;2>Gv9$LkqtPId;x5F|?F%t1Hm9;zEX2`wW_#-M<0x1lk+wW!_uh`CD1kw zO}hnCsZ}DCR1_Z3&D=-Cdn%P)y~w19ooVzlGJ`th9+8x12BkWt)0kKaeb}5zwL6n2 z^hXNSGCi6XrP03sQmC)>0SUcLWpVQXZC{*5iTRI6CNz`gaOTr^NCsKyr_-bL3Dmna zo&NoPNC}IxD2DM+-jqY@gL8@Ja5|+MWm8Lb2IW|#k#Yk2+&Ya~Hl)(sfgHNT@{R_b zB>L=~PTsdO$XY6mu2{xU?;x|Qk_3ukIu5COK;!NYsctfZxMLquLtZj9bth5F`cx8_ znnY_8((>Pjr-o=>J-=hMm0Jed}##Zo|9GX2?^POHBp zlS1bMI=JQ`*}O=h-JQu4I37j)eN5L&Qb{pCfwrGcrky1zv`Zm@IL9B*-fs_Sv))4z zS(8Hj>0y-0lR|D*@g(~=n*N2vQ(;{!?c2eybTp9;N2XA{aiq=c+id#- zvN(`T2P+cEVOlKBl1ip6S&78=DTcgx<0<9F1JZVkr9I{uliW%PbTrRsU#?uL3SIG>HgO=GTxL*B^%P{JV!EBn5EL^)EsJR&!FL;2Xu8- z7UdYH(GICxy2$*?!Lc;rzx9x=Tc#7&u3U<^lSO(nGwJr~Y)W38MQe}eQ{J{b($LSQ z^uZ@|ue^Y2pB9kTw_LittdRIJ^61*yM|2(8RK4sGefykEiyQK&fcFvY+nYsd`uSvf zpoB!E3n+C-2HoS$pvG@m#PK4Jyef03%qo+{A`2eut4Z0k>q#CR8P6aNDxfBw zJo3%Sqpv?RDfUJtJ^5EoO{a2c_pd^lC@!S?;`!9VQ9y(IIb>2;Kp88GDcj)@@qfu8 z8Q)ymb1jo>cV$!H!G|PK_K=oMl+uCWQqrxGwJ0Z~TP9mlVeypd?LS7Llua2H7hW(PfPa zT6!Rl$`6+jpGO%jimV{X)8%ykObKzwGHW;8hl(tFPSVBca~A) zgCg2>q?p9p^6BZk658#SPlsn`(tzh93T3)qz5Njx&ncu6Ir+41Qz^~+S4uM*Gs#-< zF}2>zr)$pHWX_s^nAJxqm38d0tQG z5YvtI^c>o{teAp#6wp`uV)DIFN~h-L(oyp~icQXXdAHeYt?_JBnzXO%W|+JTB-cCW+om+QIyw>6#*X`k;Uw zqm24gawzu?!!k!cec&yosWQ*W=w3NFRF=`+tWq)`Dxj+#B_!@&K`Dt@gjt33_DwOJ z^2(?Bh4~aWlt*&Ut0=9$m?~az0Z;)h#)6<6jm%tSusw_&oZ&=?PsiD5jds zLb7Per#U($q^-tuR9-=!PLz;pcPTkME2Xa|Yp5f=oVtIPP)A!OnLc|$ITKGvBEN=C zO;(XgmDbM&eyc3CcQ3_c})vuBiG_nbsmR#LIuOG=ciCch6a>1X0oI%)Kn z#y8c{EvI5iyj)5_H=j_jZ8g0ct0IT1Wz-Y+oP0zo>80^gT3ppaU&fx2lv6#;GI~Yw ziS<+~|C;6pJZC+!2Ksugn!F`yNF%zDJ}OjG(t#%WD%(tPH(SW|NE_8hyde2`^_08w zEhSYq(;coiq*+%-9LHXfcSt>5t$t0rhhLKVuMXn3ZY7H!Z%N_q8}gN{r^}lIuSSwwR88Ag)RIV31?{@>oKp6@qViYObZb)`)t#v$ zUY9CT|L~UDlA0*+X9I2Qc}KHFT3IdGNEaJx>CSKkeH4B}MfUG0s=9#|t$ItR-QUye zzil*oYCpxa4N~D)3-$eZOT28~)JJu+cKj82JbXzTo!V&1+-iF5`jGC@U8TDZTR6h9;lBA=@wQ zWL{WF^HwvAs8-S}+1J#k`if@Ut)X=VwS>Y(ntQ2%EU1CmZUeLBcFOwIKoaxYNHnsV z7TLElzgkBv#V@E(>OF}LG*VS|FRgHCBB6}~6nd+P!t~#hqEZ``K5C|gysv1=(sy)c zcQ32ydTCiqBiV26rlMl8HIZ)4J6dVmOhwPz=^y%7E%1tdzw4sC zaZR+B>5_xHg?yHLB;Vt0RL0d#aZVp7f$8COc^8c;y{83o9n>J%Pkfx8NXEUJ?s$#T z@UtNj_Z*?bwD+Wx-bo(%-|4PnCrJmtr?(v)bUD9=-ZHwHgE}cfuZ7a*y`z+_1~OmX zO~G4xC}qY6N{#(Wx6i*Pn;V@}d!n7HbKB_j?haaD{EphqzEJO@L7I&HNM8?iQ{L(h zidoZ2)3&^%w^JJFgnKJpz12v;e1l{V*GH>f4bd9QUb;RyND5pG_m@6W#l1dCZ|^7G z>JQ|$qmB9Ve!7#|Lu;-N(Y%mO%KOzyr*5@TLjMPPaqk296fu2mZ6!C)7FrtGM1=>M zDOtCT{?q(I^3_9B8QRJ8I6~iKyUE(Whno1>Ni@8Z{)ILYcV-{O+w@c4%zldO8KIu& zN!Ba-Lwn1=)4-=83iJ3#Hy<;7k9{WR^Pj0?!2r!!*hAthl%syC>@jjLDvNOX}oEWR2O$p!S`;GsUM?GhffqA-$`Y$-$`L; zg2ZW*F35~f=D|<2!}$;SuX;x_=MB=*j!#t4*G%&xK9Xbb2q`V=BgecEQXg)o{v89v z!||RzjJ8m_<~JIR?4%guHj3^0Kpv@sbi}oXzP9z#Bf}v&F5E<)ANP>{+b)u}Wb{;b zlb**YJ)Y4_t+w5y-}Z?j?tLSz<&`l(u=0jt6v5EdG(N=wCYO%!Lq!uU7`6lsE8;VrKS|$&Nqt zn_)zAK9h0nCz|$ll&-o?(60-9)KxJ_=_bD@%Vmgud;cW0h2zA{uFaO23iYSMRAkdb zN0}Tx9r;WzmvLh}p9|7GV)%p z{HFgpe^CR^C@Jy&pd{HpRK@molxU@b@DVC-7^V2aKeT~&f@T>{MeyaXwAJ$uNj8p< zi!ujn0w+mtbcm!s{iL|c&ve~%3Iv?~uy{TNr^Glhx%o3~JMxFze~i)MZSxSae1iOR zztQb2U356~2Mt(G0mnfu@XY5##fJ{^c`-)XeG}whGzItGO@a7{-}Gpphe8Iw(ty_| z$_g7M|Bpk&>HC@fOP`9ddti zbR8$NF9W1GZ;ZaT{Utk@AN0?d3vuPY$nxr6I%POYn>l_HSL<(5H2y|Gc@v~Jdn$UI zzmj;?SGs$58f-uRrb|oz(SlPwB*rsN7nXk^JBE{&3unUd#R&0=GkKgHCV}J;8WG^Y zm-T#DX53FUr@zw-15V_-PD6w9IPK8q#QEwm3j4(g&s7}Q{`U`k5gQ<(`p@JweHu0# z;()pA7=3xgiwp}+^nT&OHS1|G@Z~`4AQyK1n}YYNr(vfG59%X$Fh_t3p|^PPJns|9 zWlqEVhhua}Y?xe?{u0maNg6!(i{{Djp;LN{!tMF+T3{C5HBW^`1P9vhb79e+NwPl9 z4bxsOtV^4ZM@%k8k9ja(n;Ql}+~^6J1?5FNC_6Yw>m50v=gomuZBDobj8bj>cT#WX zM&;0NIxxX748v1YWqeq_`4K07^o*OroSo0xl@CR*k zo{F|~ZX{ggfnD@WOxDi8#D}@4+0F&OF>WkAGC{BQ&ct>;eiYbq<7DL=^fFAf^~}ak z=Q-%w$PGKL`4}jk1^robur+%c(${gKVd*s7`@seKn5kHQZYCD4o(-~@j_!TDm|ew< z5|w$#VE8`0>JKF|`htIPBSmu>;>NjAojeQc)8=B;x^YVP;>9J-=};M;14Ai3nC$0* z|1%!+na_dK)G3(8!G*|1UiQoy4?L!GptpA(u0NQGH3yikTzN1c$%BO-W*|gw8Z1vu z!;V`sklf6$lQUDGRXU1G;p5li78a}ur%tGL_ znV^y77!jQTM;`Xs$Avh2cseABAD0fyhxHXvB-kv3JLe2&ec?w%?R1PHqqAB%C@oF8?m zOEGP(2!^>uQKmQ*nhDc!GiN522G4?Y2p?t_h@v)p8qDAEpo2}k6LOyp8QCRJ)}4!G zUJGzie>Ub_ngb3#Uc48Zha!iWaJ{|=^R~}~aOfNi&YS`BkvWJxzW`;w7_F6DSo>l+ z!Xjp1PAVTN1(=>b^I}dXKQzwr!Q$L>$S3f_PMr_y8P3k+&Oo-h0QCRN!gglg|86YA zD%Cj{85h8;E&Nb?JQuP`)8T({E_CgugDQk@F^3mt?6@IymIqVk&%o(rv+!DK27=!# z#MT5s@Ko@jX46#ctQNxKs>L{RZXs^+&V}AAL8Pki!$Wcbg4ao)a{D5z66eF>zYDP= zdKsKX#o&0DA5G2tSZTqJ>#Kxd%OQZ*y35eyAczZCh@YE;ks7!ZJx`aT?Zyh+R$7WX zTB5jSxfBLmQpmX}gyRsw$4|>~eE%}&SjH^u;ceUbZr*L<4SR?i&+5; zOG!jz3E<3eL7d?d$5s&$@cM{A)_OVKtyqVi=*5_CvJ_qEOEG_m0M_fTfW4Fmb}tsi z==Y@%)?J6b$rY%q5XMg|fntmpmWiyy{F5s%eq%XK$ce-1m>6!g%E4pRS}a*DhUSGL zC_S_a-R29iA!s3r>Xsq9cQMMPnfx4t@Ge3E*@8saJH?6qL~zU4ll#^m!jCjFe=3OpD(l&xAnxJym~3- z46MYwohz`Tl{8d<$vH`uTmw@Mi5L^YBeMCv3VTmZ>`d1@gNfb)IR)O!e2-f{t2ETJcnEF}( zK{?E(IG)N7Wh#uM{D6ZYIkaW>%xEc?T3MMBzk>v1+Ou4mhkq+!qnt z<`hO|trTX;D?qhD3TkpPc%Uqe8CNz!^x|5S-dl@g6>+$m%0YI{W*looabpryd2_8DzFt$?~ zyZl$h3gIiHF*R)s zW^LFAPqUR+x>f*x|4QJ*y;UeWy#oRBw&S20!F~3640!AVdo>tTJme7lNeXwnH-VS& z=peKasj}s(y$%DVEAYQU?mO}9l)o4Bbv6Z$J_c15UUizgqs8w zk4xiq&|0kBC5o9#gfR2@F38;24cTd8=;9DXsj>t_0{?@y-+G)0UyhmXB5(^kin@U{ z7&DNE(S9+A-(CTA=QZHiv>P6M(%?UK2s3mfaHCiOPXdJ?JbM$$`__Z=^cL9HoWRM} z?YRGQ8$SL~z}^SDkhEzvYO-X}X`uk#md*I$A&->B((rZPjh9n_sNpRz^^yZ;?nW%~ z*bbl9gsx^uq>b#tuK(EE)eEI@I({|$>=~~-yRfKjEymfsoufdA**Xa1Zie-mt=Rv3 zD}ryVLZ8zbBnltKg3o`gJdkc=V zilP0m6qN2S#`fCfSZT)aX0QhB_Hvl@MF@@Fa@crz9g5u8z0NY2a*UA2Y@m7y&}$}w zWKnULjI02A+z7v}uE53(l8~2Qi?}`OQ9FGTu8c3oqL*7SIDQuU*9qh5ZMM(j#dygp ziQ5^=vFeTpDxL`;r*b*gc?*D>**5RHB^a7q3Y8)e)clk{SEVH8cMIZ}!F+7GJr5P` zi@=?{5VtIt|CzlKh+Kk$o%0ZUe+Bbl3vqVg7W6$K^z2!Mp7rvu(wd94Q&yrSRuEe6 zR$+_#I>g>x3}a@qGJ-2m_G1ZDkIZ6mWjUm6g;*S)5A!w|G>k~0s9^yN^k*TVO%$#N z*uTl;7#bJHLT6#rn680>)k+M1Uk@zZgWLB-P<2TXeG*%d#MX^CFN&OdvN&WUjxyN| zP}Guy0H+LgsjP;m6#@77~m{VJGDmqgU|wb0qL2^(H)!{v8E=!g&p1#Lt^f+U_)ZNP42<19g*>nM(2*o8Y@ zn~=~eiz~&1`w@q5Z16ay#_VR#&98^yR~g)}J;?m~349!tgwVrX2r|+{f!1+wKC;H8 z$2%bAvJJ&?dr-7}FXTxU+4l~kBj+?iDh}bK`EINek%NM#3aWo^LP57YGRo!gq*)3p ze(c6-r`>q9c_+GZ|AXTkb?BFCBPQb@%Li2OPfH%n2Tx(q?=29WFN1LrMeOc6h0ibK zP_GNXCAh6W4>q0@lNHCHIb9ZYt4`ou<^kN#JqhjfqtGck03SW(18(m_ z{}N;5vkds)X=~hltA|ZVDnOk9E`PcV{uQ=xns0)h*?QO@WQV>gHRRXoLNCP;p;kJ$ zZEp;|PEE|Lxr&k?PxxMO!=LH55gT_M)#*1N)^G!t+PrYLBp5F&@50316}p)>V0Ot5 z;Z`mv6?4MSJ0C2tzmHu`PUvpAiE_mokZZpV)nDN#slN@!tRQ5*4a9-}!mzLS0X}vF z;J=em&)JJtN`<6bKq=SjFlUo zqWEGyy27%+-I0%A?kDi+%!1p#T+BWFh+(Jz-ulJRdQ=J9MFciHxQ|5x zKKS{cJ2ZH2!R*&Hym9fv-51yK)YuL24)@S9)eUEP-LWst3!&y7$aV8U>l$xl@_J(3 z0XKNY`ax936`$IDvDEW6<{3D{Fxwkv*IdK38b54|a>dqTF1Rq&59L<3AtLDyS{DGv z3laD?7L2Op0oeTUK2k@6u$wamU##MBnJWxJqLG-2d^ty zetNi-@p-Hh25~T z$PTr;obWip2McFif%Id0e4ptAn-g{r`{WO;UNd;F@I=FVH|XiPga3pxR`NQbRn`+V zzK)3EcEX40?hs__ivO^}qc~@L6L*Bf2{+tX;SH@h9{8!{2AwV^>`QmY2S0ZhzIVW8 zLk}42aL28UHkhXGh*Ew}$ZYY!(O*sbCOad&$hxaf^N0axy&%f;y|vOEq5C~>+T06Ys@>rm?hdCAXMC@7!ki#4+_G{!$~nx4Yx7r9Jr7+%a!9)9*xsY1m|g|EFcIjBt|gur_-coa`yQC9Vu^@ON37oB z2MbXzMC|m0@ELF1;~j^xi#MvLJqFbYjB0#KPQ~B^uxz7W|J*0=-=y$&Kehp zeY}K+49C~EI5WF)fLeq(!WEdExLf1-QD3BqxMHuhFOC?xqhzNm3QpNETVUVwXL!2f z39(z2=&N->yqYU!RJkL`%^s_FIYU6-3m<>EBSqO6efqZGo92R~BoAEmv_V0J3l@dD zp+VXMaXj|8c)$a$TitMOiwA1C&2X;D20=Vl=qP0OxjI2+p(pz1TSH^i0e7`MP_h3C zP87L8P|pbl+uhK#!T~mS9q{w22l)G~kUPZzSN?Os5B6On1y^Vta=}=r1>Ei2(8%Y4 zBSZG!(6NQ{DjS?)dv12uLu5F!=`WtxkYj;3OA848Fhyy-HPq@ZgY4b#-xMpDFS-ON zI|mex+2G9~cS!HH!D}CDuZBW&HQn9?O@SL$6s6uOcpE z(T6KI`urjueYt=sjVAc;moQy z1@#9EG4-$pF6irELhT&lU9_;l<}}WhX`oz38Ra)^v1pD4v_BeRNAU#|1sh<+{fn5* zr-f_AIym*>Jj!~`L-LF!Mr@3+B~}mn)@k6Cnh9>bQAZ2gXYDRTw}E#woU#B5RHl6Gm`YX8?z%rda;s zB9xEoWBK~i@DEYOpu0X&+RkFuK4l#BRKkBAO30pj4lfR!fWX)}gkRLfehozwzdwnU zHAV40+b(k^|> z_@%`9s#>ToG=q5RDOfL4ftjo}>ern{&pQnqJgkQkXV2o+D`gl)YGPTl3N-EXAepa< zq~oe631>2vRl$A^HS`};h4YXSrZ=b~enfTu*&u%W<5KHnHj41cl9`oxiul4dbRdCvw<=ZUhexYy#GdquBB0&R7z0aeC zO9NqZ&cLQj5vP`_BYcT6ny)D1l8Pc;wV%dPt~2;%tc^?C)$zem7k1repn3c}-g+I! z$LmKi#B6nQx(b3qRA4{j2o{JQ!Ea5b8wqvnN>xRNnJNq=w6SxS297h{|C47teo(}S zqzcmN*}8lh*x{lM-+LM;YSw{q?0NRPZ!MS?=_BBc0mfe|fxV;xt}^2a}Hb%#B6?ix>4C$UnRNpzsh3I4c^K+mrN-%t&jmFhy z@Qm&ANKgf?B^roh{^$O8WyEhk2XBVW{oN|8wo$?GUM4?L#{VBx#K|gSdABl-v+p>G zt72ZI4h%0TVvg{6^tCV^4=W+KTp40{8nBZxgzphe1mD(x+%8>AeR~GIjAoq_Gu%0Q z9xf+NAoZj=);(9lh9#PC{LJ*6sSaajRhadjgCLJ04z(S{k_>g+UV0h`IE{x>H1JkH z8LD$NkvIJ$vK3X(#i;`QH!9f5cN8l5XK=t+3ld%$II43R3m6ZA!MeC?bsBu9v>^Xo z5xW1Wqg`GTH_{aGF?4jMOHKnO-Dfet^mZ{x z8&50ru-Qig4e17O^U-2Gmh;#nri7`?&(9pt#$qpJnEg=49%*~{8>zwcxGu_4m2t;J z6I)DGA-IC!GL+f2uqGCs*1@qjJ@l&EA-+rpxq;`wd0!hfn>7%>`xKU*RskJR1lQ4X zkXUsNKgv$yJBJd(wmx#+s^D>(2F|4_!bwOG+Vw`LX1ve)d;;5awPC$d5skHa_#$=| zuDr)EXPq(zv^AkAql!^xkJh)8V3MMQ(Z}adBgcH&1?IDDNdK*&~FU> z`;HK3FvZ**mWXrKf{h-FjeZwUC}WHx>RQO~H-*P6b9A3F$Js4cSpIK@H5?YWpy~*I z7i&zPW)1l-&bT6FjNJz=!u$6X44$>c#Zhy#tGeLQTMMk;Wr26&Hkd!e^5be-4704D z@S7?6jZARn(j{b_(ZIk#OAK}}Ib5@W@w zwpAP5UYGDs%ovkq&d6HJ^0ii5NHw~{!{;JK*nP2=FTw886)3PeQr-MAJOnMFBjASD z4|OrQ&k&1+tRRu$fCmRGAh$yUnWEabsjG*X5!x_dvi&e=je_Tv(EoY~p&5pd;5EP} zxrSV~YGe0B9gLM&vUT(^D65KN-UiUFGzWLJ9?Dsa|9e9p^-A{8 z+hm02dY3S)V2Ru^0|*V9pu$WIF%L{pVQvOaJ{>l9!U%1FrchdN9$c#}QDtC*Ur#Nd z#quoc%H!DHXM}~!9{X(7psu6?Armd6>zqe{f-yoK8{yMAV>GrIVM>@fCU>Yo!df5G zigoZ~tv1#VsAJ670>92)giVVk7O62`Vrq_fD_yAQS-|(R4h}ptfp?fHYW?+~w8sdO z?dG^V>I_{YbI9>n;CiSwmOnDar%6MktuTg3ydH)lZ1Lln0s4wBLt&*JhAONfcuW&^ z>1z1xY6wp;BXsgxz|c<*yQNGZ+hBrOMyA-=eF1xq7{K3>)e?&>p_*cXIcC-vEVjU4 zx-C?uYT=C26_npKLvVl*4vV?sAM>*fE>~Es>xK;jm!Qw;u)l9y@L0?RT_)FXrNad; zWZa?N?~XG;uK1?q3WER#6gs*<{?8SdggW8Jd1pL3=zw)^)iiwhms!8W1?~~f_&4GMyTv|Gdf*1J2p1UjdE&?{7pNLKVu7(I9*cOw zE|Bec$O%eET@dj88q}hhyx6tlo}O%GjWb?m`9O@-<)B~e;tuV~?=DdFcf>F*0G53RDh9BxK`(dKuD(f$L zp~KD&QUl)5XLWy7y(b!Nt?;?s6J@c!kcsid2>~CRjP!!;85bNn!FpD#_fr0g`3@l$ z=>2iUZ$&$teD8s>Yc4o`$_KN@yzp4w2EEO0=$q<}Ie#2r!th}hVGox>p5VUcf)3V8 z@osSjpQ;z6$Ndmg;11Qlu2^*37dJhx;!BY;O#EH&GQ<~g-`r3U;|{RY;!^UVVxORvIgmk(+SykO3*nH}-PZ*x!dX!&B@?qHU~Ud74-*WkI) z5C4sOfREb;X1jfncfu9V9lS7ag&)q77ya0F1A-CEAH{lwG5GK<0?jS=aVq;R7JLhb)`c6$+YyAPYa*~jFaQ#hk$5Ygg7Dx3 zv}Yw_$=^U&4n)H-B^DWV2@v=dkNLSF$mfg21Bplo-HC#LYYa@DW}vAz3HzJk;phr@X)MMqqA`3X8t3eSk;ENVZALLpRPSXd2T%HY~rye zAq#@C8SvVl2kDqR1n!K-e_&;?cb#0UT1%2&js~hf5ET^)MB4xnr@fJqda8X>c=ofI68B9OMYa zGxZEOZjQmMRq^1iNr$<_L&V4zpfEKNJ1k0&YM6-v&J5f;`4D#!lW=NtJaQgqVE?hl z_>o%-zRf8ROvuB@#hLhhGXb}?OR&Q(1N^eF;Qssod5wwKwJ;l_9;GlSdxq-6nF!P_ zz?s@|9CeJu(UTRZvdu=+*~j4f{SYTr(-4@*boV|PQX07s*;j^>*J5z{dluAZCZl>r z64;9+V8)w)t*r0AJt-Tx{*SqP68QbPtsv`cN?v?Vqq`mmXo$^vC#D_XM&>%CK0y1l59Bcq5Yy z-kegb>}6*kO}X%?$-?7#*>L*%9MfY;p>0rv^6&YG^2o$it_R3}pNV*me4L1VhRJKi z7`UDZO}{F1+vFpxtODt_d3bcU5Vs}M5NT762S;Ck$NMGt_cbHkp&5Nw8&U03jX%d; zuyddSe6}yZ^i{R6K39#z$tU>!xe|9Li%@m29_5|&xW4rja))1{qq7RhR|^sSJ0G3T zDzWWUjU)=6>$7f3a!{m)cmc2=Ehv4n?Hlx^9n=-RN(owGSoCTLf}IVrYSzfituWL zpLm7xu_u^4T#BRaHF#rDj}q4^q&GD}wCf3;Sv*IDQZ3fZtB0m*5ri2Zo4ISTr?m)U zC-d=WX&Nk_JjI^Xm3Z1-45Q*o?D<`X!9jNJq*jUd1+Q_hmFay`7JE*q0`_V77_NAR zs_rsOOzFgtgIG-vEx-{X#;(9E;Uk{7)1}J`e4?Tg`SS?ovv9+CeoK+1& zqe6(@dV}odH}LUq!&RpaSoaNKuXi;jHou2LPdz3+wWE8u9XVOG2;*wNE&VF2$?pT# zt`7X}?L+MBZk$W0#;{Nai!mKAZ5)Ff<7tU{JDjCT;e5RT;=0|;zCS@X`!&*?%CPm> zYnCT9vh$Hn=>PqMgdL36CpGAie}=Twr#Qg>5}S%D(KJ|xcs4gM@5dV)d)9!g@Adc* z-UzGCW{d`wLh4O1^fKB|8&-!|Caq0)j-mDAbYi_=Oe8!Tse$Vsh8l(8pcT3 zS8#qC!L@H)NFMFPc*k1|r?Z)io;{ch`i13-`=EQSkIg0=#^-@=5N9+0mM!W+#^Lu! zxX)zvo6QPLd=LIN&vBlw7EWeAVW!cB%R4);!mbIahlde0@fA7`+aR9*3R9RI53t#S z{nc%V==+WxU%HXV{;zTF3--o-fLk$}@A;!2p-BUf_4t8QHj`@e`aVe3eZ~&GQ8amS zDEJzGhRNM_bT;-e>hgWK%chY#c_1 zS|9#oeZpq?3HzUo2AF$fYsW9Dr0)GsKad@N`mluse-m44k=AW_V`yaerJAwtF%w7w) z6pjv0;5*kJ%nba2eS#y9ZNDqqUQG**c8bYo;swE#*@1)MYc*j*p}5%XEdk z1w8Eitvm`Dd_&+>9)tP+IJ)k5EWiJ4uk4bf6sag_P)Ucwxlodnrb^1*d+#lxl(zQX zebU~0+eDJ=Rg^>s^*g`sA6}l<^W4vUpYuM~`#SIUeREyG9mYEz3(}ZUar(Sol%D@F zf{LmNQu|zK`XAR)uIE~E+atdliwzGNKXvOhHZbiqp7EmJSX)Ptx^8JU&ai7U*5~?$ zkLG?gwwINnrP9JwM^22+S|mc76@@5s{$=c^K4iQ!a=_T3N{EJ?{%hQRMTi>7{xN=O z^v5_#MT}ne?c&b|e;RkY3eeGo0#re1&{%Wrka6p9yK#JvIJL9uHTG`)VSN3RFm1!2 z@x3d3#;s@ij9+Gna2LnpbW5We0M$1$6MnRfi%Jodm#AvPu*H6ARik@^CG@c_MOdB_Hec&cZ zTG}H(&koAbI6r<2|L%v8BWcuYDXMZ&magM|Ck~Tk=zKXPx^$ZqUBY$p|C|)3>o&+z zeQB=c7biw{_6brIodM%pp2BqRRj!98EK1*<>oE45`_Fj&BoVr*P?Guwicon~u6@ik zpvUeIq_z{JsFayBom~yx)c?CCrr;tOVcAA3REnV>+F@w(}V9s zsX~?*jpVv@>cS$FjFF=STr*jqS&-V;iO~sndX2>> z=w|M*k}EDl>uqG|Ca#-#L2?4U9wjiwiam8e!3_b+wTq<0R;P^VUD+L}0y zuE-RoRsq8Fh_3{lYbZenp9#~qVRCdC_xC!&^|9Z&a9hVpHPU}&mg<0Hh=B6|)aFe9}<;l?IN8{*SjlafZy&?_bUIqKPZ%IfM*Wcd3 zHJ$g0QWYl5{R+hC1P!hgdPkK`*Be1^C2|cw-H}v?dspWcD$d4sq@0lt`}SsW_X?(2}D;E#v9AFDf+S5YJ83-^OdsjHK5>xn^~x3^n5V z=W(ecXxc4tt|KW$v-ow+1dH>}N6|?xGF&%WlD3SHp~c)6U>o;mXmVGilek|@QXbcG zUow`e@Od7)H-)|ynL*RJx9@uH!8)Red(Y``KeP^cx?`B@@cT`qRA9(BSA=`S$STk( zqp9>yh%(*rXaa4@97$()Y0!LAd79*{L>ui!a2;_~dV%}E1anV<@8XKI&tHtX6-!gQ zFWj>rUzhri=RPc!67*z=HZ3dV-ccbkv}%?-m9Lsi%ldTbZK_U1r)bc@FfB^sv}oWD zExJlnjT)X(qX$KeXvQrqTJvQARclbA^*^Ukcf%P}{{{Cao;igkD)6|ym!^d`)#-}~ z8g$?i_YpB)KyP2wrgONjgf;hNys%^{t@72NbGZlL$I;wVvurfoyJa4IK23?%t52lY z6*TFG3A3oM>UiqTJ>Yh1nZ`X*xK~W42E7-dNNM6!dQ(%KcBRdtQe4-5@>Q;V_+kv* z#C=9jY}KRI2D2Hl)~ZKrLK#Xu+U5HAtRH4+j|2u^C$QGWVVI^PNdI z=jhUR^4uFF)_||kB&xD&Dh+fTL#H{Jei#|6uq_uCg==J(p^zKV@T2^CD_m+av zK6Co<5~GLRrqjdAr_*(gjQe^SaIJj}x+`HJjhED;vt4G>nOz2S=K)iCewGmxDJ3*b ze`p1UxcdpB`x=uxVsFoFKL zJ&j&-F`#jW7tu+wlc=lYY?{mcGL8OeP|r)0K3J+v)dtMygY-#UBUF#>^V6d*HZP=# zXLV@mqXksSbT)MwVpPj;3eDvFb45s-&RtLGKDQb4$l$A(U|v7jadGj_o=m{xAf=JN0V)- zfr<_FzhF&o>29ENuj*4}%lUNptR9uVwuI`7S<&~V)9G{XIn+XaA$_=cE=^uDg`U|o zot}1_P20C>(c=#obzEyfH(i}dUAVV&eY6oxo?}dVzRaU1yA0{o@w)W#dlQ;?e?Hxw zXF{Lewxp|1&7xaQTT!QlMl{<^pJpF3qlxuv=vXsDy2#UnN~N06p4UsL_)!yTskMNH z4=<)21;$kKnic)ed=(Y`txqq_UQY8A=F_x?Ueu+P(elG~)b#oq`eNQ{I;6Osj$3X` zSM+S5x_Wl>Y==GV^0B1z{@K&%ec^OUj4jnxUqL_T*iwrpD``vjYWnNmS~|MckY*>Y zqhd`qv^!u4JwJ|6hYRNP&n`=PW4tZ>Z_7%m_h11nGo4R+EtmXn{1&~oR7zwG{Upuj zm7+~&G_R+7t99uJEkioQaVN5lGcc9rtc?jrX?hl>Th$PeRJ2+ zqk(4BdM=@wR*X)+u!w$--ayscHqwRX?WwwQB)y;DOkEWnsH4()+OTaSHQ4P%2NrIj z2Mrd{yI)t)s4+HFJjaKgy%|K`KXRgfzb&I5cP*xSBOK`|g|*Z@(~NpZucMl=iVNMS+*fxxI$$NenX#NM-4IQST8ycq z(|YQ}>}jx`8P$>ANT==JLUG8E%GPe6PQ@uS|9K*v?dwRlosXc$vR6?>*#!DkdNF-} zdI?=W5JBgNdeV~SCA4X)J-s$yL(}9Osr^$&+7s+V4NX?j>|t*@E<2o_7uZ6Va;&-@ zTtTlETGRNIJO&OH)UeZ@iu*g!^Y^-wLDtYtPcOIk~jg_Y(au_Xy1u+)rODJ4UNpPtl(Tu2F@+ zvvl0qL$odI8kOlhL*4EkrJW%UsD1Epx@GMZ+NXGd9#~gNt53e7)6JjI&bf{B-sZ3L z$@?PuwfYk+9rufSP5x^*hUn)fvG3|J_|G_q=EM8Y_hBc-svW@X z`Fp|ga6W<(>mroU{v5C-1@dhHT`}Y*dHtLjRUsus@WA`nDX*dzOV+ zpEIFRl@5L1bR>IiMVdk?4mNDVkwe>&FP{X7!30eDlnTK;X?Qb`jyK|2aD9=2GuN|` z?3s#-joa{YLmHMmNW;nVX~;aC1)1BKh^Wj&)}C~jyJsONAOqSGTanY1j`suEC=bZS z@0KihTJC`Fh@BYqJ`3vR+t7b86Mk2>AyRk;I=j=b!#N!@th10WmJV`f8&;a6;kRNY z)NIo6`*1wAP0z#z_jDwxr{ewVWN6py#66`Q7<(}lQqI|ExtW8qW9e8sZWlBL_F&bX z-FR>^8-35VBX7fu)k!JeJ#$B(NQ0`RlgJyODDw2WwvK#H{bT zu<}+mB+qO|$@raE5|V{OV|L(VW;O!vZG*^8Ifh|+AVcxV8LPi;gD&2-ZGz*_k zY{hIo_bmoIE@hbrxs;C0CF!UQ+=}a&88ErA4fC_p5%e<+;x~8TK1s#=x>TrdO2^)P z>1Zt4hRaUrs2;fk@^|@~rKIEauXMP-OM}DFH0Xq-p={MQs2LX1dmVn;pt|7j2IgNpR6Q|IT(Qj9=@339RU4lq1faRhh1Soh{#O9nU*-H zWxM(mzgG_-rcU`_yZ zG`*32E(|mG2Ef@l3f&(9p`YP}rE3Dv^wJxKo&37N9yn6y3#(PWIP2quy(aF6yyk^? z6>mIn^u&o}!C2KCi2rVTqWYFM^0K@z>0bac?)zdwryn%DeDS;23$iAWnCa$)<;t!g z{CI;&?pS`u7e7h^a4*IMH`fN^gtjZZQ#@g+AC72cFO1vn3x{@Z+)WR}&+;vZ4fKVL zoIiww{4uW339ncBqHwo6EW$jn&C~-4anbk_;*Ev&z7QYl2)A|K=pO5Z;y1yVDdYx2 zQBRB!cS3oPKMH$-VKCkY>P{}GFml6}i+o)7oiMx59RqD1=y&&mS-Cf?OI%=@?}~m| zXG}sN*zv^l5B93-!sU(!Dpr}H@m3IKN^e4*d@$xO3c~Hu1T@@=Mutrk zu8a#r&iy#Zj||3$(=iZSABOS`K5)-ofn2Rn$eFt%#V`zCCv3*1(aVtWGZ+(}hah&l z2SV@qV!E2`hrs28FN#wG5j-*+|EM`cu^};cIx(7@LJ>U})fSc{n2=0r4-EK$FnkZa03&irnQF!($3Y&t%kPsb% zm#-3`MLZ#D?FWy$77&jOLttVk?CnFaTpc$OE9d(+}zA{-Ab z*92@F7msga6LFjK*WZA6)VWQ7d#DEloSkuOpA(ez{7@_rfOlRNSUj{DiWdHue0&XF zy;O(HUmJKH;yG!!0b{SnVDw*(*I|E*yy}es#~4WG`{RLO1Vlp;(f8d4le!ae_C+)* zWn)2}$HK`u0FF5!$Sn%O>xxi>=p;bb#sh(>J24zDHG@&B7lG?xp(y_s0_l`^ghqs7Xjc%N{pTRZW)

Kxk+;RR&Fuwka$1Wv5xIYO+Qb!1y{)<52hA6l$3dgSN z!SLgJ`}aaLesHWlFo=Wncm92TuG5*JxbeghQqO{*xWpI#PVqR1N5R1=4x?`IJ*6WS z+n5jhCx>F@`~diGjK$_P(fBkk3@7%5;H+0Dekmm2y>lr3`bEHLLkQlO#KHMnEY@fT zV5OilmW>I-RJ&lDUl#yX{}4p*u|JInL`5ND1F3?F2$2vK2{CAM&won9=hl8*u-W`|K!x7sYiKkhd$L@tA zTrL`JmtF8iD+JqyLs0f79BP|_5x~bd)EbJhqio?aJ`5*IgD~oi5B%PS!j)sJne(e{ zr60`Q0-z}8g49-DlxX=P_I(KM&iBN~dBJ$3?ho8@MNNM+e(iF>-pgJHi}uIvm!UB1 z4M3i+19sW-by4?)_BMZfs0v2%8h>cXZ^1);?e+%Fn>x0Q+^}V{9~v#! zL;qnIK3wxfMtvY8GJ|3B#2;n@K}gi`L};)tM#uSJ&E{Z^pHR?!K4@|BfnbOq=Kl$U zx)I+$YyENOrZ?t|b%Vh(PgDf)y&)k0@ecxFnd}ASZhv^5^G9`NAUgYep~lyB*27Rt zEegclOn1C};)2mHLNPfv7?VWfc`fb<51SB7HV?+;!2q0h;PG#D!>dK15Wf}<`wt8% zF)^rr9fFDPlCjM_4DZiHV5vt4N>Uu*@-70AuRSp;ClK{MG5FjZfGKuin8Wd6eKs6x zIexaK#Uj`}1X|W%I5ImN&O011XF&o2hoZ2i_!Ikh+Zj%GhuMZ7n;`Qf0Q)!&$BgyI z8G$f-3EYfz0p7^^;DRNe%bBO|Y9#&H09{@)?7OZHlVw3ja1BJXQZQT`T_NIakCLg8 z2s94CWuqu;u}grnR4C>PgkaI-cwCG6&VB|3VEQpRM5_B?$@f_N;~=?0GQO#ReiFF$%l?_`*df99~DmaP?yZ zZZ~lbofZYnm>?)^3c!TxLGYgCg^ZUWyx;IcyId@UxARY zz8e9^YW2jctNyq&!wUy4d7^oIAll8`V7=EH9rX@~*b#&sLLRsox)PSh!f~|U1NIsL z2-1&)aj7@%Obx+--%fDibNA!9@=etfu3j$C`W+3CgK^l(Yt!CEu_&0~jdN=QVDd8> zJ$yfUs27IokHheKg%9NGOtAK9APj%`qx@S4uCMjQ0aY)&<(#rV-V6EjeBsG4_F?2^ z^gRtn(-IG;I)~t6LKrT_g+qRM5E}F%vD!2oH>YjEO4m44FAu<(wUOAg)g2RmMj+n8 z4`J{4+S&3N+dlv`2jd~x6an%y1ZGBFxFO<=1!1vJ8w|yx=t!)}3rDwk5SBj-g65V8 z7`Ox@UpXABOXIOY-3J>+N1}%Fa}i%B9i2#AkMM_eKp-R|T(Gae6JFi%7|r_tm9l8u zS`mRx-lItQxuKvW94Bul-~$p6&oOv(GtU)CjwfDkxz32d;)}^xe=iK;$D<(Sk^q(2 zanM{D0aaetzf$G#Fpb8Jmr;vas>7c zMMCaI0u)ZB10xgAJ2DYw+DW*!G#YJ7(jeE6fEO<#F^9jWb}#|1S}{1ZKLS(lB%(|& z0V9hd|5twoeU66)@2A#OBw$lqB38-9fOs{hDzsup|MOcn`T)F$vjI z645&<9{a{7VDNbad}bt|@nH-uf0Eg|7m3 ztwa*sB%RPAwhpU&0?~axn%8MDNZ8Kn%}e~eHepzt9EKdeFI0?A1a(fpbKb|QEK9%` z-Y+%ncfhOQAlL*%A|*H!qeR2-I4%Nf_pQYBoJ2TSMB=sIR>b%OV{~CWLKeGV){!_E zopy(4V<=WF=Q!LOkIS6T%y#%gNjVgsI0i>#MB@JFSS&jcgq*%`DCP2dJ-m);%nyRU zSP))Z3&R`Us&MIrV=EYf>?ad&z+R&|Hq$%c3Y zu64nm8$menX9mW`hoa6a5Vtv(yy2KyJQ#zY5}{as+X0_ey5Xj~8&Y{+r*0GvZ(b+c z{0)YEwJ)CVydobsH=GLsP4~gu5Bz!x5ulH@z=hXRLKR^!;=SvUo%V=ru!mqTucbM! z4a>P9E}zf6)DH`)0c>i!l#MaG-ZgfM==4E)4;fy(Hwx~Gbj=lL|SUbrB17VJsA@2Zv5#Iaf`at*0 zM%;hu4T-TEp+9#e)2m#@T$TsH_*@iBKh!d_(qc3QckxyTb^(>5c%%ND*O zYq0N>JH}65kEy>M;d5aFqRZA}y`BeVu=V)GG1t_x7B&;s!S2*L1RZz5tuq@C@q81e z=~&~h%d29<+z|r_oAV(FEyg?Z@3nxMR`)K&}E8$X|DtwYg zVSJzpj24Z;KU-xeuTnvk_C#n(tK$9xC1?$gMLNH(Q~oF<+*d%!RRuijl85U)WqiCP zk55e#vF6(-bd`)kl+Q@q%9h34xiVNBC68!bd59dB$F4+qEIciXkl*8wJwXmP#>zuS z{0|#vCkG)@MJ$b0#;B8{@Fz$LhbpD;V4fUa z^Y2PxV7U_JR)}KaI58YMErW+|1@U-?48&^XaYJ?#lr)raz)cCS6~yt%RTdLNr0_*q zj^7*Q^O-6NaRDJPCqaBQl!aNGEWA(2!{ez0mMxTne4{jGx5#qr3t~Z+IE;_WpmUQ7 zjvGqg-cvDLkPybN$BM8`P=@m?X?#jmK=n``+j2?@%BRJl!Oywlt|BhJmjx{lgOv-DhtOK;_#D|Mbtl=P09%_T1f4r#_*g87 zv8k2J>bN+rJ4>VdM=h%wD}+Urf*|{(U^y_%cH4=dJwO=u)(FDhT?EIj%0T|Q2-tLS z&TW!NpEeS2l0>n=X_%dOF9L(O0p_ypFSBnFMn{G$E>8Z%?oSrL)vwBE$Q6OZPgyh= zNP~z;aXgEmWZn~I<vK9-y*fwnP%NP8-Twsn7*X^=3MzZJ)?dU15` zki(39L(F2o2o}8+#!Q)EWtENwGBygrY)l_}wD3E-Am7gtmbbGb zO1;eLN_ligyX@bt$2!_cLetUberWm)V`^W<^QE%+uUeR%-u;8BFM5<7ht{ zkDpAYppRV-7lP7oE6X41XD{7x)2l+*elKFwD-s z?`Gd51hGX^6gNxwgZ6QM*@)*|Y>bj1Dn<@4w+#|_%|8#p5NiqUX4WcwY+O+{YhN(L z0)6^fMAsl2Ehr5~pRep*`Ve!y|C@zOlfwHdN$9Q>K-lblw&~Ab<{K!C&;Nd~?IA)a zUC_!jCk!#=Tf;0{bbzh5?`HbnT9{n5FdonU&c-a_dG+@voA5>u*%0UVviq1}?J%oJ z5yAJTl9*E=jzta)Ol6ZGJf?}_LZ}F)8wg_cHxUT+^|IS*TiJaGVoljFll|JmoPPAO zzGK3$K0eGE*Y&gTuL9Vo|DA2Q+r-welv6Pzbrnwm*uH`XPvGhNT1Qq#tgTyyQbfnkMnm{p&$a`l@hqQMi|$H zy4X?gpDg`u57XGw#^eRW(DJ>HDUB4xQ#oN?pK&~m5jZGfvW4Z_2;o3f2eZ)=N2$kewz#{ErSo{$pMK2(lImE7OEH_Z=R13S z`5znT`@?2@5=4^k59VDifXTaun3sbv`a1-nX*bM1ANJcUkEw#g>leG2nHg3tZro=lQ})aX6zYY^DBp0W^E5UzE~71 zAJ#DK72nw<9Z@XL{myz`DdIHe@`|yNaR2y~sTlOKf9E7Hx&0qgUnGJp&jrx=NCY_? zS7Az`P%s;frxJhIgY(jOr_{r|*YvQ6jl=BtZV`-}s0hI)|JY6&33zZm93CkF&9ol2 zS5X+DZ$xk^K^X56e=(h3J?ziHPB!#Q0%fVgjNSRcTtf$#NuD5r+y>Z(J^k#9(hv)? z<#;F+K*VS%B>Mek3cm)K#%2NRU;Bq$?fA|%J^R6q$^T*^wk=G*f0(s=7RJ(A0sdTU zkeQY67@rqJ#2#UYujynh$9tF%`OS1SzOrjkgUo!vKlVVE=i!1uc8TZD+qr_MZF|ma zD~j07RRX9R+0U@1nMrC0U`BWs`>}mI3YN=2!eb1K%LHJxUk*!$M?j}l3L|SoaZFtr zN5_i7-9ZLlW{AK&P6ToK((o%5ME*0LPqKg6?w&4YcTEJwPNJ~+{g<7o`^~~OiDI)l z-$QN)V#iStti1Y-oqpZHK2IECKcj@OU{NJg6C7ZTzlBljKE#F}b}=mnA?$ip#DXIF z*}xV7xL0>Eg{mSpqr8pX{w#)AkLAJ!N1hgtj)LyXg3cH2%In7_k><8K7SDf3uzOje=wNKVx*6`;g+tS1LFOHRnA04bpV+0}$e>3^6es*tE7ppkh z&un`IP+#|hrQZ6*lArc4f58go{<(?0e)2;+b_nli+oa-z_m%n>4Zr1u@v< ziNVlqBr2i=vFMio7SwdJz?1E)@md$_92#a3b&`lt6a`fqVqZ>6VyH+NMq?Gw-7f{{ zr6bXHUL4bGC1G+#3I-A*@b<70j^34n=PD8OvO#wZ4BRJ=gLjh#E-ssfN1Y-_Fj4`PABz#Cvv5#$BCl5_q5bf5oVhd+ z;fE%}ze5|PmyBW5XN7ttO{8zoLu#BcI<6RCzVd3EJFx(3pUuMUw|aQ^9v5TCvhd#-LmPOCX?7~7&`a3$`2TMF49tMDMk z0f#C!B4nNi=55=Eg|nEeBMPy4pNJPVoU7vtT*>0tY2LGRF9WXM=RTFnqDB`a`XtQDT^Tnfbl z%aHPOA>;=v;Ivd8@4V;0Eo(l?-RD9nOb3&`0XqXtvE-={PPQ$=lgks)dff`rRyOd) zN}R5kg*WbcXj7ez>>uOsU$-7yT&*#($_m#60rijbU~yUxg69|FR+TXl7SDl6!#q4P zoQ|E|>##dl5vyk_L4Z!d_Yg%SnkwPO=`m2Worn5VE3`x}LD}v_SX#OmS!s)4s<|BI zA{;vcMyO@0F>C|qtg(Yd$$U6{Ho)vCU33bp0EDb@Ync^dc&yIc91E9g#u(={8|##< zp*=wlw~x+4x4kBwx6Q`dkCr&{#*+V=(|SBhvBtYAdT76C4uxm7FmEu!DPLgpF@{DD zYrI;w6l*#)FmIJL#)Paza==n_bNtCW5QwNRfo#t_u#XH2UaZFFn^t%?WiDLCu7yL0 zH5SSmAg+ty{OR@hes~%3EI8ioSi-`Ype|)O9!i;GNZtynUgkI^%bz#jG{(ZaCOEoq zF2W~K-1`e8_gdnfyD=)<7vQA26~v5<;n%wuFB>So?>HalzRboiXH)EbV~s(!6kUbZ zn2>3K&-=_^QN13uQ_LaeLU2-hC4vqufpoGF##S3Z#%3XEPU^wA(FBJI%(0~xcohQJ zYg(W}8T@&cDW2^zh4@ZmejnNzHFXPdTZ}?x6pybi!TBkRF>jL*el;yZ$*>vx=ITLZ zB%mT|h~RUJ5yh4uIno3l&QfUK0mk1m#}wHGu*{(-ePxOB9RzPTTcY8*Ij(2Tf~2iJ zmfe^Qkuj^0#mphqZ;2bGE6@~WgBxmQ2pG`9HiaepUM$5&LlZ==HiSd=LIkADLSlyw z%8O^Y*qPKv>}14!rrCd+8Ma<#^EN+V4`a`;?}|6r1@G(ZbNVx;GwB+uT=|Nf ztj=S%m)v9j37%tqlg}}2dXmljdy$>Gxt+DF-^sLpZDlK;MX{=*HtbIC5_aLBDO*L& zScZien=)oDyW>q*#qfN#F?S(LEihvcBuuu`guS*jVa*FHScH@b>vdYnWd1W@0b$Eo z+9z{1&eViiu35?Uy;#K>_OD?x+aFOqWuZ%O-lsJoxYWY`z15&@=zA>UkZCHoXqMUyEEdE!WLXkVdeLt zS?S3ncBx}GD;S-^@&&iEQ8_!=j%6wAy300}wl0_jW~8$RJxR=yKc zEv}1Y`=4)R=jygHyRIzuqIwS-las?_40f^O$Bwen{#2IIyqjHX%wR5cam>OZniWXz zV&W~y?8UV0Y++>v`=qg*_3q!sd%8?!a4(tZq$RO)x5HS=x)fF^kjh$%nX62piBggWH zz+LQS+!=OPZa2#|&0yVm+t~KpU98e2jg3{vX3G`!vl%i;tkxxysR7?X&E-*J1X2-dSdQcsJ|O+spcOF0#Ap z4zl@+jxy(CC)kNgM_8ivIc89Hh6$hA&mN9G%ED{6vs0#L*tqmFEXL#tbMHLE3KQ1PWd%}KSoWw3%T)V;YzV2s9SFbYb&NpnO z?j2^W@syqYaECoycZ2DrUtr@5PP6#mcbQJ@1!gt!Dto#0Hgkx$#m;Yhz(yau#+>O* z7V`WFn;!R&E!dXFR3slUC7a9a-2O|fe9t+SnSFtM-FJ>jzd6flzg}aHZ2n`(pU$wU zf&V|QBTU)(G&}icFB8u_!ftAuXFcl=uw%t1SjOMOZ2Y<1?Ah6^Ed50qOWv`HnLS>` zIwHcD=4~IQzH%|k^;yAc>^8BfA{K1urzwlGv1Da4jaZhY8M~gR!*-?2V7eP-v89Wr zvJF$lG4b#Tj7866!JnqHm3vg!_a&3qq{PWgD_N4I{8VEVe#&g+jxlUfxF(zZR+d#Q zRAc*gPi1+cYE0i-gXK7kVQ*FCSl6pzQzPddQxCm9(+h!rO)D&gnCk8>(+%d`rhlfl zn+n+%njXk1H}%|8ZCV`o%5>HD0@D@Jd8RG`XH8EI9yN_rK4+Sff8Nx>_l#-e;11Kc z1-}Se_k-jWe3!t5Q7>^9>>@jF3K*H%_LJf917v2qn2|)3pwW&VVWa$5 zA)~e{0!CNIiW?OdiW@1niWuEY5i(l4M%d^;vjF!n6EgC+F2H^1g^b8b{`--j(c2tR zqfJwVjP5NLHtGlyGK!ryOqO02Hd3D^VD!elha3|7Nt_a!$wl)oWW7iW8J^Hgtil_K z-H{gZ)u^6assBpSrE18nvGt@ax{k#3Hjutk&BQObk#t_JC-)p{NK17iN$dYYPCGXc zg*}~Q(%l*&)!s}#MYIy3yY1xLrg}0;yqcV!_mvFa`AW)2BiVJKo0PxmA{WEH6aUO_ zq_C`uWPa%)n{v;pjVW5i)JAEfFv)hP~?Jv^j)<7((I>?1j4J2Bwp5!^zkRH7{ z5^hvY;?+Kp)!sE^%CgVoj&m)^X#Y%Jys9HI(lzAz%sO&wZwraKR!0&->WS0eYVsqm zj(DV163s(ZWT8qGIdi+3EDHTZ#_#$>T7TD)*)Pkv?n=hnj zMh)3DR89WfsV2sI8$c%Q2#ALrDO=gYs4p2bZ>A?gcJ9Z^euO>HBOv>VB$uFquE zttO)VqJ;?2X7YS~Gs!vfm3;H6CsD2SWP@2VsZnku-On0G+v65elhZ_|S~d~gPi;i6 zxt288*OBqL4MaGrkqiztkce+hq(%4(>9|@)rb>Pxy7F!0{--8#X=5w-^09>oyVMb_ zCvD`v?gp}UX&oV3+ex)eBk7fHBoEpf$>pkgqAT4(Tx?p&F{@@0pVCT3`?iqR@r|TD zteyldY9=eSo5^5C6M52BPu7UElQG<&qGUl6F@MuQ0$tk3oD0=tZd@%9+Fnnzk2Mj) zj7HLuQAbvdXe3$pTZzo%W+Lj|LRRUvl0|MUL~LaTnSZIC{6Z6v{rr_&GH4*VKX@MO zts_@ox06@L+KCE3Pmb(YV&~98_K#{H|5V$^Tdyyq?!qZE5vHh|k5MrkMyCHxuF4O=P@Y8&TNOLB1VtA?!shk?e0E``3RZ z50^BN2I&Tpy0eMsoNXl*94Fs;T8OK2Gij9VActxjhzlQktbHRq`7^oP*-U&=8;Qq~2I950nao() zN~#2!NThi^89l9ubn)1m?*2yZy!%Yf>NOKXgL<-5vx%5)ZzQ5KTF8sRI&#>ej-;M$ zCHk^8%*JvXjZ`G0!Ej1*|_zT(7*htKe z^IYa@_GxV`>EUBvCDKTQrq`1_>)MF9V;#vlT1OsSX(dvA)x_^^3#n9UBIT8}>yL5QYSk@6y$xlSm>LXcfT}_-HbDTY@BpIHZLtl#+X$)79*AXq` z_va6!&-f!5-&H~m1XYlvn?*!pLKQh`QAU8tt894K9D=V%E`|+^~Cr12jZ{!nVc!AAV#Plqxg7^ zw3ZQdo(qcFLPjcI%E?rjGV<|71zBEFN;Yr#M1n3BkQeb4Wc;BIMBBTTObD+cDmlgE z^-wwaJgSm-J}o4ccPq))ykc_h{zuZ_P)vIB3(3Hw0`kk^0}0{G{cCwGu{-&Jw63ot z$Ilj$`pl0cq@aXc4*fvLiFf2dM={yIx}4Mmm62BU3ewbDOn%sYAU?-GlC4EmWZ%RR zVmqOjBwi^c!{-V~@FhOxuv&8dERSD71@V=rBw~XfNSoa!lJT;X#9w(wiX@B3xW6CB zl(+B6!^?#vuIN4K^C~1;or=g@?RR9Ge?IxBUqXBX3y9Xqd~(XRfE?rR3Az-LgY~aS zp4dB*-&#a$$8?jZ*A-;;$Vw8WTtRZDmXiYg_he7iXVT7NQsh)g;=L+K>&^}`kLRk6 zOA*mI_JNdkmXa^)ONp;VIoYXINmx%IIXPHDp4}@WpY9cs*8VcGfX^j~m5^PQ1*B?2 zAz9a2Lat^O6Pe$|Bs8UjOhPHqRr*3Q&KD8c#8M(gOUYDLL~@oFlfWBg#8$S9jM`B_ z+&jxi;nBilgTT}h;>gs5eX?E%l8%& z`DI09sci|FakP|7zE?^P?kgpe`bvq3L@|+)%qMjpOUSu@AINowcVr%4BfF{sVqero zf}eaQ?VgO@&1Ieko}$EaSOYMxNUgl5*!# zqIe{q{I?>X^zgB}CX|!@v&BTC?>*VQp@fX#@zPpbK_V^`kc=_!NM2wO=hPz7dbfxq z>%1o$ca@McYm3N#JWksC%1HR}a#CJiLR8ud$dQZ1(NsccdTfUW& z6=%wcL`Na{^YR1ncv?gbohcwsm5WK({X!xnQ%r6xDj`A#ONrgl4@5$~lKk@eNb2Vo z5i5R7Q1Ay5uqB`9b8dUFvV`m%eoxrM3X(XXlCupi}wyeT4Pn@Y*5yQM^RVKGtRnE8*tc62Ku#RH{e+%mqtIv+@GNeS7i zT1MVUl@erB5Fv+h^0BalRJoTCq|6fPf8P(L*L{&gh zdhZYdgak+kz0HoISg`Qyf)y*+1pxsS=|#Yb1q=4B&-&~YJG~|#y@L&~i;CY_-;cXO z67IQkX7=ovJuAtj1q^3186INo%V>pqIpsgjBNwMa5XczNVoJ7wmq;prX?GEozpA9T=`!+LCnYbA0mOr=>EXu;T05(pq%Fy?swT^Vf`71Quy#$Bh%$Yz0*hPD@y5yOV*r&7vM zVfoTnOpB6=$f2Q}E()0su9nbchSRMK2lbsY;?&D1EU|+2KB}aPEDt{|ETfUkw(77v z+B{84*={B5uCygI=~E?%6v}8Z^KBQa6HC>KDSfGo^exIs{G^=H*f}|0OKEsT5pA!l zr0JV0Dd6rK+V!iDiaMn9UsE;73>bGYOjfv-Q}wHI`g^UIa^NQlC>bf%W_6vZZ7Y#`uWU?339tntG&)CjV1G?p4)vGpCMj*VfaC^JTO# zgJJ1jEfq|tq&vn^a^}|3=D$VsBUDNSC(1~FdljXaO6k$lYMK{PO+O!0Q|tapYSJ&E zW~)-#6e6XVUs4iJWPBl(()*K@6xvfx1uXA|46CMPO0Bf^nv5Dtt0=FplH8JIq`98; zEv&ClR;(mfi+WPdtfA{W8fY4O+-O%%CoL=J=E73SV*cfgs-_u(%c$^6HPf{9R5qxJ z2Ft2xv%HL)S^k9k)X{v-8#))v^wE??x|<}Uy{eV8`${dfpJg@2{0-f(X`l>Nha)z! z{^nyXB{7|qF|wA#2icy>q%@|qf=)G8(xHMv^7pKv78ZlAOsBmqtRkPmRn$;kL)EPR zn&H$ypGG#5MRE<Q^}h&X-cor$&01$MW_>1%1x0p$#3?6g#ep0-qJr9M-En6Op9p?h>n=)qx@7c=U}BE6KFI;Av!m5e+Xcbl$YYwyUXkexCb5+|jw3uWXxy^O+6 zNlB(%O8rqXy7BxCWzUt;=L6-Wap5K9Y$zjFR#UW>7tjmFzeO#@bZdPvDfP;zdwDsX zU>Ho>TSdIae7X}?M6WKF(gdYSx^cFQl+*Jmdtp7L*p*S+ch<`$l+bwA7yimFqYP0M zW$vz^NBbH6f0fYam*p%ki|OFpDiW}mtiHhZ%+~SzR!Nc5-w?I1K4g!KeqJi2lkHVB z>v;{iK9EsX3F|)?mit~6(%xw$SzV|#5K~1l1j>UsGtq)m1M!<_~&W`ZE`E4 z=lg2u0mI7d$O_u8TSE`uSJ6GyTDrpe0qHRo?>s3jtgRv4rERo4yp9}|s_1+}4Jj{^ z(%7jjWPGrZX8YHXl6gCA>#Ct)CJn^h)<|y74K(#q3%$r}Ci9*;>R@zFr7uZ4-?sYW!d=q6ntfN$MBb}^mpxBx9R4#6% zao5_YsK1t~6Pifhn(2@uEi`0#BXxK&&Ej20m)_RV{c{Z@OlTn2zV~!lt&Qrt7=C$8 zv`oK-p5LvfI(w#Z=d{w5sSV`&yp6^meM`Z=>uG*Q6LH%cX(!YDwF4Vyt$z)b3hHUb zzfCkUs+Hcdd~GXjp~q%5Bz{;+wLNd>u6G07S8XSa;4V71rG`c`4Q4UFnZ{jeqs7{t zbd6~w`-}#fc!=r9&CHHjb(GEY@zAwxG=Fy^$$Qn2o?|V&3hbmAcFojR-9YR0Tc~gr zo53)h@6CGb57sTTCc2I$|B;f-u{RWM)J(&#H&PVSj}FyURMx@f5vi@LhJT?+?A#4u zHI&9?3y-Qg>9}4!xee{4rB3fzfBcT|&RYtNY^5p|t1w;*sX5e9f(f%V?G1gN)IbxQ zJE&uD8~J#*P_kF9m@ck&ZK0({>)C9hgq(;iks-q0%%N+k&Ixx7EPAE1}m_;Lf{#-|W6)hycvYpNxXPkGlne=Y4 znTPlt`8BoBv-@nWBWtI2rr#p3c2MEYPI69arA)haQmtrU`l^wR^)rmjYa`z?Ebc3M zX~&E%ieR(2Yv^F}l~xKm(?fRynyH;>Y4=xeY0KaiIzFe14$XW^v$xgL)H6*aH?xyg z7PgZ`Z!e9EZza>dPC7fOiKb|Fk*>iIaL&CY^8ZN1UhnAigtug@-a#+6exSiyo2dF+ zD|y&FT|2E2LZY8TzO%ybEe#Jlv2rN3?sW}{g`i*eW;5Zr?u01htITb&L=kO z`$SvPKGX7oZ?tpNH~RPcXR6Y8PwUuhY2oP)G+na=>2_uo+5Jz-O|$`H@nsKaxw@5BhlHE4h1qp#ipE z=?8#KQ#BBzjO(|DF5^jB<=c5 zF>BfEH9`)*;}wvt$-dzqFbK70%JDo0{ap&N>&a-OT02HorYp%>LC0MQo^$X>IizNjEMh~5MZr= ziJMh$*V6#^`n6&6SQX0lDzKZNfDtuH&>EqJ?Tw1q5v7JS!B7N>wV_+9iVKd4NbgdD z^+{#?_E$p;M-wahm0|OF7=kw_V>mnZgsNhTx-zoPC}Y4SWrzz^;d;at83)v0u|)|Z z+4;?5hoQV<7%p#8LisF3WJ{G1U!;WYjVdrOP{WIr8raU)M%PX?L`P{L;-Dg~`zk|o zgCY*PC}W42I`*?Y+<9V%(xd7KX)?sAMe69>&-OS=4XS^XQIoESu=5%iuu~5{FLaQg zrGn@wT3Do}0}n$D?D%4U)+}|{4pYVv4KzD|6H^f21merW&wPP({xpedyiN$DV1bc%z^LugNMH?5_eBc`evK z(nNQv2KLU@!(D$(`1WWc{HHcP#i--OLOs;IHN^BWdWgx?#v5jn*-8Vn_ZdK-tqkE) zZ3NjGR9noAMZl6P*7lqkBba3$-)TF60~vNMGyVQ_2KnO2XZ1q7AGC-2-m^|1AUD9q62q- zZ4CUViHkqAux6VLp2r$tdzlVeopf+5MHk(28qf*T#%*I=+?rzodo^Pyv3(BcF+}J- zZOC$rFfmvg%Hwp=;-HPN!=@M+ZjL*D&5_e!j$z?;IPi}RltnzuNU=ckUbdG@hVXf0 zjHQ#g_!4e_D|d_#{lEzEGc0lTEC)G5EHL~(GX&mZYZ)0}=Sv-=>RQ4roQu9r3wTX2 z#ro$Ql%3Ou`T}Eg*IFPT(i(?-Ob{GliQHd&)MxPUA(RWlhZeXV&xfXtDGurzL-~q1 z^5*Gcq?tZUM{}U&V}#yMmbk%uJDJ&g<*6~|+%?3C2s2oX=YVTvj8)h5kgvhPfKx{J z<8OrFFHKb(S7DfR?)@4e@rpsiYYh>rWl~dfyI17DEM-~ zt22lDVso6B!o%GPE}|xw;8=|*db`cB_mmz&GC2qiGC|b8dMKK&ho`lg@MC)&^w=2L zuZ{7;TMrUjBg~64!})Dy7+r0N#JlD&km%r8n-R`i>%lWfAB#sA!+W|FzFS#i%XKS^ zE;Yos#~f^UYlZ1i21v5D05+Ln_BIntTxJfkwZO1Y3-~dtX`B(@Z=x-F9F6dOrWKwX z<=~q;^X)4>BK%Ad$Th;EqlQpFWQ;*4`B2`;Y-qQE&qWSW4j5pRDGxF7X1G*l27{F* zC>mvjr`BdT7sQ2aFb@F|c3d@qz@CF&&v@|u#lfa02AEcAiQhw*EfX#AXNMKSorTDq zY=+zO`EdSiio=dPD1EiY*B{1cnPZ9l`;1XL$p$~Z^P!{5!^uektSPs|=zsah`oe=| zpfjFzn8JXqF)+{=oo6i&F~AndiXx~zumtaqEW%j3*R+9&28+*ZGwka% zgl88QMGv^}xM6~HH6GTSvBe)XKCHwX>~FWknlv*6S#jY%$OM^>4Y4uG5pUBhvBAI& z*5y1{#Th}_!vX_mGi))tx&j4|Wmuv=!vf!OOz;erD2TSkl@~m0Imtn0v=Es_X6SA* zg{qq!77w<7`M*4t6DDvRV2%g5W(Y|(#cqx>9%>t5%PVV4PcVnV98+9pKEGY#09_9w zL@njuWuy(peG@}dgNqHdJh-KqBX^JhF-xrRi*JdNS|gZqO%T8rq2I|0(bYT*n&F6H z*DW!Ai5=q2tuXDA8T=;jpb;p-w>=`9++>Yq_ib^2y>{Bl4#~?5;dPgf_cBxX^YoB$ zkzs8Z2j5@$)qoNo;>1dwAI1Y>&1OS8RM|iGSu=!{@3!YOb^Y8*huOeF6+V=m57c2gJ4w zhjxh&X^|qNHQFKXr3dC@*;$ zHC(jp@n;PWwN5n zljex<$d#I7cp`)gzz0=4Jp?V1_zvwdcp}y;S8&90?1u)1!trqW{$H#U!D*jyabpu zP5^&v8_cnD!EQx+41HpSKfj%!#I?iK9ydf4JAf+`UcNEx!NG@f+ZgB7ouyO1K!A=|?aD^|Eb ziJh-&X@`YBZE$6T0}@Rfp)^*2o?EWi8E1jRo9&@I*Ae>RA}sjGK|bs2x^HukaNY_A zY+aRPJ?OfaqWvbz^&J-QJnn&@AJ)j8#e+qPC3+57!F9X<*ZyOGn) zsWQIIvw;iCw^hHLpvo6O;j}eAzvN)V2LVd=3h;fb9e#GPTp|(N{G1^29f1q4J+VO~ zLR*(DW({}5h?$o7Hq9KfbRAGpV}YrSJVf^habd1KjyPH1Po5nftg~TbNOt`^VtinO z6PLU(zQYF68aGUEvcvMD&Mn*Xw*b)Oht#RX; z5WlYQ@mj?Z$`h^7!Ej>FwFZ}QQOk~T*b^W|G-i(HnDU-?KsbS|O^Bd&0%W0a49fSo~3drT=*0$sr-m zF6M*A*rH>+5C`5k!hNVCR(!QW z;zM<$2ku|+#?GTo*lOsEv94|iRdmOE#wV@6S)Syv^-he0?lmva02idqasoe7f>1{{ zXg~JEng5(|QceWb)l6%d4oA5Iq5YH)N4|{2uO>GXYl*SU&;|N$*>&eh@anuPcJv9c zafl5rj2(q~6=%FSe7(-8wMRKwe0yNxk`L8_&@ASo`su8$U=Z%*U&iK&JhaAIjq>lq0 z9I{8GjPZAeJ$`N-hvrpd@GM9KX`Ki``<*c$dNw3pzyNPn|JE|jR+$Ej@`S8(407Jv zL0zBK%Scyr4xfa90ypS5P6C&482YDR&|Z6px+O3xa)aojCt|11#P4t)u-ii;GV&jc z{WJy#M!2G2lm~bZC*XP2XdL7CASJ^c*OjMY&vIW}KQ|h3t46@r+Xn&bC*Z7X4C-r(8wBNVD=}Sr5&n!>2%YFth(+U0IA({!`&KXC1O~10bF|ABUoYFl}5AOhx`Ue|QPqbYvH(MJSHM+n8G88-GZtyg3_U z<2fkWx*E$|<{>6_F8;&?!rEXN;{4WOUg>P4`i8R~wN1m$S3xLPHy`qxC8!=7i?rQ| zXgZw4YFi5Cc&Fmzw?tHa+y>X=6sF0y;+a)Ca*H#ucI7UNJ(~=LT?ybGjm4#|B&^bj zgMVovRvu6L|2mVlm`k48*(B@ashi z?q)<{K;LF~C&XdSv-N1PTaMogWAS}hIM%ETMtOGv-VJ2uELjKtg~3>s6phZbXzY-! zhom_awux)8FDo8fEfkUmEaqdSbUJN}65jPgsG%^vlEd?qA zQXpwfz@3r=jCz?4m8r3~ti2U>yOOYEU?!3T`{D1I4(EV)Hm68}3nvZ-CMRKcej+}E zg=6)LSWFz0h?*N)u(v-7TKX|K{Wum+^AoVSEfK@j*2CmkI5^pfIHbA}2J4a$7nO$H z!dM)c6pYku8Mx3JjByR?aDG$-DqpRGN>(_|+9crIxj58*&cu3tD&7U8Bm8M1EMKPJ z@QpaA++Bn6n_F;uXbke3lQ6I;1y8pmL1)x9bQo_%S!+D@+9e?Ray-Trq=DTV9QJ{c zh*jH&w$P2Z_C67dE~Mk>zBpX&k3gt;G%^d~vFu11PJIbMZ$v!iyv;&$YYJli97M|V zcz88!hHGCs-l`|!hHC`2UP@#iNKL~coebPw5s%GVV$eHq8zwA_#N>`m@U4$RU|bju ze~w4dws^Ri#KFot1^FAdeQ0h&DihDW^OpM2<{AsF>& zGvrPtVq#b@)Rv{-#?%mG-A=&-{jJ!evl;Id(=mN^0&XS7q1!ha)3=7f*fbt>>^gnt z6JWGB6+7;1Msb;q@CET}hDi$NeMWb-vSd)Snxx>Ss)W;EDCtg9ms~leBN4gkNY)k` zNtW~MC1=%5B<-1&k~}|-BqG90V)e&W@?)=~WJ}CHl3AZzC8x98B}czZkQgocNAf?i Cs)(on literal 0 HcmV?d00001 diff --git a/TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/HI_create/__init__.py b/TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/HI_create/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/HI_create/test.py b/TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/HI_create/test.py new file mode 100644 index 0000000..15747c4 --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/HI_create/test.py @@ -0,0 +1,13 @@ +import tensorflow as tf +import numpy as np +import matplotlib.pyplot as plt + +#数据导入 +HI_merge_data=np.load("HI_merge_data.npy") +HI_merge_data=HI_merge_data[0:1250,1] +print(HI_merge_data.shape) +print(HI_merge_data) +plt.plot(HI_merge_data) +plt.show() + + diff --git a/TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/train/LSTM.py b/TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/train/LSTM.py new file mode 100644 index 0000000..e27ef40 --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/train/LSTM.py @@ -0,0 +1,175 @@ +import tensorflow as tf +import numpy as np +import matplotlib.pyplot as plt +from model.LSTM.before.LSTM_realize_self import LSTM_realize + +# 数据读入 +# HI_merge_data = np.load("../HI_create/HI_merge_data.npy") +# # 去除掉退化特征不明显前面的点 +# HI_merge_data = HI_merge_data[0:1250, 1] +# print(HI_merge_data) +# print(HI_merge_data.shape) +# plt.plot(HI_merge_data) +# plt.show() +# (dims,)=HI_merge_data.shape +# # 将其分成重叠采样状态-滑动窗口函数 +# #train_data =np.lib.stride_stricks.sliding_window_view(HI_merge_data, 3) +# # train_data =np.lib.stride_stricks.as_strided(HI_merge_data,(1240,10),(10,)) +# +# train_data=np.empty(shape=[49,1200]) +# train_label=np.empty(shape=[1,1200]) +# predict_data=np.empty(shape=[50,1200]) +# z=0 +# for dim in range(dims): +# +# if dim+1200>=dims: +# break +# predict_data[dim]=HI_merge_data[dim:dim+1200] +# +# if dim+1200==dims-1: +# train_label=HI_merge_data[dim:dim+1200] +# else: +# train_data[dim] = HI_merge_data[dim:dim + 1200] +# +# print(train_data.shape) +# print(train_data) +# print(train_label.shape) +# print(train_label) +# +# print(predict_data.shape) +# print(predict_data) +# # +# np.save("../data/trian_data/train_data.npy", train_data) +# np.save("../data/trian_data/train_label.npy", train_label) +# np.save("../data/trian_data/predict_data.npy",predict_data) +# train_label= + + +# 处理好的trian_data的读取 +train_data = np.load("../data/trian_data/train_data.npy") # (49,1200) +train_label = np.load("../data/trian_data/train_label.npy") # (1200,) +predict_data = np.load("../data/trian_data/predict_data.npy") # (50,1200) +# train_label = np.expand_dims(train_label, axis=0) +# # 数据处理 - 分成一个epoch训练240次 +filter_num = 600 +dims = 49 +batch_size = 1 + +train_data = np.reshape(train_data, [dims, filter_num, -1]) # (49,5,240) +train_data = np.transpose(train_data, [2, 0, 1]) # (240,49,5) + +predict_data = np.reshape(predict_data, [dims + 1, filter_num, -1]) # (50,5,240) +predict_data = np.transpose(predict_data, [2, 0, 1]) # (240,50,5) + +train_label = np.reshape(train_label, [filter_num, -1]) # (5,240) +train_label = np.transpose(train_label, [1, 0]) # (240,5) +# + + +# train_label=np.ravel(train_label) +print(train_data.shape) +print(train_label.shape) +print(predict_data.shape) + + +# LSTM_realize(input=train_data, filters=1200).getLayer(layer='LSTM') + + +def predict_model(): + input = tf.keras.Input(shape=[dims, filter_num]) + input = tf.cast(input, tf.float32) + print(input.shape) + # LSTM=tf.keras.layers.LSTM(filter_num)(input) + LSTM = LSTM_realize(input=input, filters=filter_num, batch_size=batch_size).getLayer(layer='SA_convLSTM', + query=train_label) + d1 = tf.keras.layers.Dense(1000, activation='relu')(LSTM) + # drop = tf.keras.layers.Dropout(0.2)(LSTM) + # bn = tf.keras.layers.BatchNormalization()(Lstm) + output = tf.keras.layers.Dense(filter_num, name='output')(d1) + model = tf.keras.Model(inputs=input, outputs=output) + return model + + +# +if __name__ == '__main__': + + model = predict_model() + model.compile(optimizer=tf.optimizers.Adam(0.01), loss=tf.losses.mse, metrics=['acc']) + model.summary() + history = model.fit(train_data, train_label, validation_data=(train_data, train_label), epochs=200, + batch_size=batch_size) + # model.save("LSTM_model.h5") + # model = tf.keras.models.load_model("LSTM_model.h5") + # + # fig3 = plt.figure() + # ax3 = fig3.add_subplot() + # plt.plot(history.epoch, history.history.get('acc'), label='acc') + # plt.plot(history.epoch, history.history.get('val_acc'), label='val_acc') + # plt.show() + # + # fig4 = plt.figure() + # ax4 = fig3.add_subplot() + # plt.plot(history.epoch, history.history.get('loss'), label='loss') + # plt.plot(history.epoch, history.history.get('val_loss'), label='val_loss') + # plt.show() + + # predict_data.shape=(240,50,5) + # 连续预测五十个点并画出来 + predict_num = 50 + each_predict_data = predict_data[:, 1:, :] # (240,49,5) + all_data = predict_data # (240,50,5) + for each_predict in range(predict_num): + trained_data = tf.keras.models.Model(inputs=model.input, outputs=model.get_layer('output').output).predict( + each_predict_data, batch_size=batch_size) # (240,5) + trained_data = tf.expand_dims(trained_data, axis=1) + + each_predict_data = tf.concat([each_predict_data[:, 1:, :], trained_data], axis=1) + all_data = tf.concat([all_data, trained_data], axis=1) + + # 获取到的所有data进行画图 + print(all_data.shape) # (240,100,5) + epoch, dims, filter_num = all_data.shape + all_data = tf.transpose(all_data, [1, 2, 0]) + all_data = tf.reshape(all_data, shape=[dims, filter_num * epoch]) # (100,240*5) + flatten_all_data = np.empty(shape=[dims + filter_num * epoch, ]) + (all_dims,) = flatten_all_data.shape + + # 将所有数据展平为一维进行画图 + for each in range(dims): + if each == 0: + flatten_all_data[:filter_num * epoch] = all_data[each, :] + else: + flatten_all_data[filter_num * epoch + each] = all_data[each, -1] + + print(flatten_all_data.shape) # (1300,) + plt.plot(flatten_all_data) + (all_dims,) = flatten_all_data.shape + all_x = np.arange(all_dims) + print(all_x.shape) + + # 获取到的所有data进行画图 + print(predict_data.shape) # (240,100,5) + epoch, dims, filter_num = predict_data.shape + predict_data = tf.transpose(predict_data, [1, 2, 0]) + all_data = tf.reshape(predict_data, shape=[dims, filter_num * epoch]) # (100,240*5) + before_data = np.empty(shape=[dims + filter_num * epoch, ]) + # 将所有数据展平为一维进行画图 + for each in range(dims): + if each == 0: + before_data[:filter_num * epoch] = all_data[each, :] + else: + before_data[filter_num * epoch + each] = all_data[each, -1] + + print(before_data.shape) # (1300,) + print(before_data) + (before_dims,) = before_data.shape + plt.plot(before_data) + plt.show() + + before_x = np.arange(before_dims) + all_x = np.arange(all_dims) + + plt.plot(before_x, before_data) + plt.scatter(before_dims, before_data) + plt.scatter(all_x, flatten_all_data) + plt.show() diff --git a/TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/train/LSTM1.py b/TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/train/LSTM1.py new file mode 100644 index 0000000..fbaf693 --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/train/LSTM1.py @@ -0,0 +1,215 @@ +import tensorflow as tf +import numpy as np +from model.LSTM.before.LSTM_realize_self3 import LSTM_realize +from keras.callbacks import EarlyStopping + +'''说明:将输入LSTM的维度理解为多少维度个点,LSTM的一个cell所做的事就是根据这dim个点得出dim+1个点的信息''' + +# 超参数设置 +filter_num = 30 # 时间部 +dims = 50 # 表示一个点的维度 +batch_size = 10 +EPOCH = 2 +model_name = 'SA_ConvLSTM' +predict_num = 50 +save_name = "../model/{0}_FilterNum{1}_Dims{2}_BatchSize{3}_Epoch{4}.h5".format(model_name, filter_num, dims, + batch_size, EPOCH) +predict_name = "../data/predict_data/{0}_FilterNum{1}_Dims{2}_BatchSize{3}_Epoch{4}_predict{5}.npy".format(model_name, + filter_num, + dims, + batch_size, + EPOCH, + predict_num) + +# 数据读入 +HI_merge_data = np.load("../HI_create/HI_merge_data.npy") + +# plt.plot(HI_merge_data[0:1250, 1]) +# 去除掉退化特征不明显前面的点 +HI_merge_data = HI_merge_data[0:1201, 1] +print(HI_merge_data) +print(HI_merge_data.shape) +# plt.plot(HI_merge_data) +# plt.show() +(total_dims,) = HI_merge_data.shape +# # 将其分成重叠采样状态-滑动窗口函数 +# # train_data =np.lib.stride_stricks.sliding_window_view(HI_merge_data, 3) +# # train_data =np.lib.stride_stricks.as_strided(HI_merge_data,(1240,10),(10,)) +# +train_data = np.empty(shape=[total_dims - filter_num - 1, filter_num]) + +predict_data = np.empty(shape=[total_dims - filter_num, filter_num]) +z = 0 +# 重叠采样获取时间部和训练次数 +for dim in range(total_dims): + + if dim + filter_num >= total_dims: + break + predict_data[dim] = HI_merge_data[dim:dim + filter_num] + + if dim + filter_num < total_dims - 1: + train_data[dim] = HI_merge_data[dim:dim + filter_num] + +a, _ = train_data.shape +train_label = predict_data[dims + 1:, :] +b, _ = predict_data.shape + +# 再重叠采样获取一个点的维度 +'''train_data.shape:(sample,filter_num)''' +resample_train_data = train_data[:a - dims, :] +resample_train_data = np.expand_dims(resample_train_data, axis=0) + +resample_predict_data = predict_data[:b - dims, :] +resample_predict_data = np.expand_dims(resample_predict_data, axis=0) + +for dim in range(dims - 1): + resample_train_data2 = train_data[(dim + 1):(a - dims + dim + 1), :] + resample_train_data2 = np.expand_dims(resample_train_data2, axis=0) + resample_train_data = tf.concat([resample_train_data, resample_train_data2], axis=0) + + resample_predict_data2 = predict_data[(dim + 1):(b - dims + dim + 1), :] + resample_predict_data2 = np.expand_dims(resample_predict_data2, axis=0) + resample_predict_data = tf.concat([resample_predict_data, resample_predict_data2], axis=0) + +resample_train_data = np.transpose(resample_train_data, [1, 2, 0]) +train_data = resample_train_data + +resample_predict_data = np.transpose(resample_predict_data, [1, 2, 0]) +predict_data = resample_predict_data + +print(train_data.shape) # (649,600) +print(train_data) +print(predict_data.shape) # (650,600) +print(predict_data) +print(train_label.shape) # (649,600) +print(train_label) +# # # # +# # np.save("../data/trian_data/train_data.npy", train_data) +# # np.save("../data/trian_data/train_label.npy", train_label) +# # np.save("../data/trian_data/predict_data.npy",predict_data) +# # np.save("../data/trian_data/total_data.npy",HI_merge_data) +# # # train_label= +# +# +# 处理好的train_data的读取 +''' +train_data.shape: (total_dims - filter_num - 1, filter_num,dims) :(570,600,30) +predict_data.shape: (total_dims - filter_num, filter_num) :(571,600,30) +train_label.shape: (total_dims - filter_num - 1, filter_num) :(570,600) +''' + + +def remove(train_data, train_label, batch_size): + epoch, _, _ = train_data.shape + size = int(epoch / batch_size) + return train_data[:size * batch_size], train_label[:size * batch_size] + + +# # 数据处理 - 分成一个epoch训练240次 +train_data, train_label = remove(train_data, train_label, batch_size) # 去除直至能整除 +# train_data = np.expand_dims(train_data, axis=-1) # (649,600,1) +# predict_data = np.expand_dims(predict_data, axis=-1) # (650,600,1) +train_label = train_label +query_label = np.expand_dims(train_label, axis=-1) # (649,600,1) +total_data = HI_merge_data + +train_data = tf.cast(train_data, dtype=tf.float32) +predict_data = tf.cast(predict_data, dtype=tf.float32) +train_label = tf.cast(train_label, dtype=tf.float32) +# query_label = tf.cast(query_label, dtype=tf.float32) +# todo 解决模型保存时,query无法序列化的问题 +query_label = np.array(query_label,dtype=np.float32) + +print("predict_data.shape:", predict_data.shape) # (649,600,1) +print("train_data.shape:", train_data.shape) # (649,600,1) +print("train_label.shape:", train_label.shape) # (650,600,1) +print("query_label.shape:", query_label.shape) + + +def predict_model(): + input = tf.keras.Input(shape=[filter_num, dims]) + input = tf.cast(input, tf.float32) + LSTM_object = LSTM_realize(units=50, batch_size=batch_size, if_SA=True, if_Conv=True,query=query_label) + # LSTM_object = LSTM_realize(units=50, batch_size=batch_size, if_SA=False, if_Conv=False) + LSTM = LSTM_object(inputs=input, layer='SA_ConvLSTM') + # LSTM = LSTM_object(inputs=input, layer='LSTM') + drop = tf.keras.layers.Dropout(0.2)(LSTM) + bn = tf.keras.layers.BatchNormalization()(drop) + d1 = tf.keras.layers.Dense(32)(bn) + drop = tf.keras.layers.Dropout(0.2)(d1) + output = tf.keras.layers.Dense(1, name='output')(drop) + model = tf.keras.Model(inputs=input, outputs=output) + return model + + +# +if __name__ == '__main__': + model = predict_model() + model.compile(optimizer=tf.optimizers.Adam(0.01), loss=tf.losses.mse) + model.summary() + early_stop = EarlyStopping(monitor='loss', min_delta=0.001, patience=8, mode='min', verbose=1) + history = model.fit(train_data, train_label, epochs=EPOCH, + batch_size=batch_size, verbose=1) + model.save(save_name) + # LSTM_object = LSTM_realize(units=50, batch_size=batch_size, if_SA=True, if_Conv=True) + newModel = tf.keras.models.load_model(save_name, custom_objects={'LSTM_realize': LSTM_realize}) + + # ''' + # train_data.shape:(570,600,30) + # predict_data.shape:(571,600,30) + # train_label.shape:(570,600) + # query_label.shape:(570,600,1) + # ''' + # # 连续预测五十个点并画出来 + # predict_num = 50 + # (samples, filter_num, dims) = predict_data.shape + # each_predict_data = predict_data[samples - batch_size:, :, :] # (5,filter_num,30) + # all_data = total_data # (1201,) + # for each_predict in range(predict_num): + # trained_data = tf.keras.models.Model(inputs=model.input, outputs=model.get_layer('output').output).predict( + # each_predict_data, batch_size=batch_size) # (batch_size,filer_num,1) + # + # all_data = tf.concat([all_data, tf.expand_dims(trained_data[-1, -1, -1], axis=-1)], axis=0) + # trained_data = tf.concat([each_predict_data[-1, :, 1:], trained_data[-1, :, :]], axis=-1) + # # trained_data=tf.reshape(trained_data,[batch_size,filter_num,1]) + # # trained_data = tf.expand_dims(trained_data, axis=0) + # + # each_predict_data = tf.concat([each_predict_data[1:, :, :], tf.expand_dims(trained_data, axis=0)], axis=0) + # + # # 获取到的所有data进行画图 + # + # print(all_data.shape) # (700,600,1) + # (all_dims,) = all_data.shape + # all_x = np.arange(all_dims) + # print(all_x.shape) + # np.save(predict_name,all_data) + # + # before_data = total_data + # (before_dims,) = before_data.shape + # before_x = np.arange(before_dims) + # all_x = np.arange(all_dims) + # + # fig5 = plt.figure() + # ax5 = fig5.add_subplot() + # plt.plot(all_data) + # plt.plot(before_data) + # plt.show() + # + # print("before_data.shape:", before_data.shape) + # print("flatten_all_data.shape:", all_data.shape) + # print("before_x.shape:", before_x.shape) + # print("all_x.shape:", all_x.shape) + # + # fig6 = plt.figure() + # ax6 = fig6.add_subplot() + # plt.plot(before_x, before_data) + # plt.scatter(before_x, before_data) + # plt.scatter(all_x, all_data) + # plt.show() + # + # + # fig4 = plt.figure() + # ax4 = fig4.add_subplot() + # plt.plot(history.epoch, history.history.get('loss'), label='loss') + # # plt.plot(history.epoch, history.history.get('val_loss'), label='val_loss') + # plt.show() diff --git a/TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/train/LSTM2.py b/TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/train/LSTM2.py new file mode 100644 index 0000000..0dd52dd --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/train/LSTM2.py @@ -0,0 +1,204 @@ +import tensorflow as tf +import numpy as np +import matplotlib.pyplot as plt +from model.LSTM.before.LSTM_realize_self2 import LSTM_realize + +'''说明:把所有点都当做维度为1输入进LSTM''' + +# 超参数的设置 +filter_num = 100 # 时间部 +dims = 1 # 表示一个点的维度 +batch_size = 7 +EPOCH = 200 +model_name = 'SA_ConvLSTM' +save_name = "{0}_FilterNum{1}_Dims{2}_BatchSize{3}_Epoch{4}.h5".format(model_name, filter_num, dims, batch_size, EPOCH) + +# 数据读入 +HI_merge_data = np.load("../HI_create/HI_merge_data.npy") +# 去除掉退化特征不明显前面的点 +HI_merge_data = HI_merge_data[0:1250, 1] +print(HI_merge_data) +print(HI_merge_data.shape) +# plt.plot(HI_merge_data) +# plt.show() +(total_dims,) = HI_merge_data.shape +# 将其分成重叠采样状态-滑动窗口函数 +# train_data =np.lib.stride_stricks.sliding_window_view(HI_merge_data, 3) +# train_data =np.lib.stride_stricks.as_strided(HI_merge_data,(1240,10),(10,)) + +train_data = np.empty(shape=[total_dims - filter_num - 1, filter_num]) + +predict_data = np.empty(shape=[total_dims - filter_num, filter_num]) +z = 0 +for dim in range(total_dims): + + if dim + filter_num >= total_dims: + break + predict_data[dim] = HI_merge_data[dim:dim + filter_num] + + if dim + filter_num < total_dims - 1: + train_data[dim] = HI_merge_data[dim:dim + filter_num] + +train_label = predict_data[1:, :] + +print(train_data.shape) # (649,600) +print(train_data) +print(predict_data.shape) # (650,600) +print(predict_data) +print(train_label.shape) # (649,600) +print(train_label) +# # +# np.save("../data/trian_data/train_data.npy", train_data) +# np.save("../data/trian_data/train_label.npy", train_label) +# np.save("../data/trian_data/predict_data.npy",predict_data) +# train_label= + +# 让LSTM看50个信息(NONE,50,1) 每一个信息就是一个点,这个点只有一个维度,如果想使用多个维度可以考虑前面使用CNN,增加其channel的数量 + +# 处理好的train_data的读取 +''' +train_data.shape: (total_dims - filter_num - 1, filter_num) :(649,600) +predict_data.shape: (total_dims - filter_num, filter_num) :(650,600) +train_label.shape: (total_dims - filter_num - 1, filter_num) :(649,600) +''' + + +def remove(train_data, train_label, batch_size): + epoch, _ = train_data.shape + size = int(epoch / batch_size) + return train_data[:size * batch_size], train_label[:size * batch_size] + + +# # 数据处理 - 分成一个epoch训练240次 +train_data, train_label = remove(train_data, train_label, batch_size) # 去除直至能整除 +train_data = np.expand_dims(train_data, axis=-1) # (649,600,1) +predict_data = np.expand_dims(predict_data, axis=-1) # (650,600,1) +train_label = train_label +query_label = np.expand_dims(train_label, axis=-1) # (649,600,1) +total_data = HI_merge_data + +train_data = tf.cast(train_data, dtype=tf.float32) +predict_data = tf.cast(predict_data, dtype=tf.float32) +train_label = tf.cast(train_label, dtype=tf.float32) +query_label = tf.cast(query_label, dtype=tf.float32) + +print(train_data.shape) # (649,600,1) +print(train_label.shape) # (650,600,1) +print(query_label.shape) +print(predict_data.shape) # (649,600,1) + + +def predict_model(): + input = tf.keras.Input(shape=[filter_num, dims]) + input = tf.cast(input, tf.float32) + LSTM = LSTM_realize(input=input, units=256, batch_size=batch_size, if_SA=True).getLayer(layer='SA_ConvLSTM', + query=query_label) + + drop = tf.keras.layers.Dropout(0.2)(LSTM) + bn = tf.keras.layers.BatchNormalization()(drop) + d1 = tf.keras.layers.Dense(32)(bn) + drop = tf.keras.layers.Dropout(0.2)(d1) + output = tf.keras.layers.Dense(dims, name='output')(drop) + model = tf.keras.Model(inputs=input, outputs=output) + return model + + +# +if __name__ == '__main__': + # model = predict_model() + # model.compile(optimizer=tf.optimizers.Adam(0.01), loss=tf.losses.mse) + # model.summary() + # early_stop = EarlyStopping(monitor='loss', min_delta=0.001, patience=8, mode='min', verbose=1) + # history = model.fit(train_data, train_label, epochs=EPOCH, + # batch_size=batch_size) + # model.save(save_name) + model = tf.keras.models.load_model(save_name) + + # fig3 = plt.figure() + # ax3 = fig3.add_subplot() + # plt.plot(history.epoch, history.history.get('acc'), label='acc') + # plt.plot(history.epoch, history.history.get('val_acc'), label='val_acc') + # plt.show() + # + # fig4 = plt.figure() + # ax4 = fig3.add_subplot() + # plt.plot(history.epoch, history.history.get('loss'), label='loss') + # plt.plot(history.epoch, history.history.get('val_loss'), label='val_loss') + # plt.show() + + ''' + train_data.shape:(649,600,1) + predict_data.shape:(650,600,1) + train_label.shape:(649,600,1) + ''' + # 连续预测五十个点并画出来 + predict_num = 50 + (samples, filter_num, dims) = predict_data.shape + each_predict_data = predict_data[samples - batch_size:, :, :] # (6,filter_num,1) + all_data = predict_data # (samples,filter_num,1) + for each_predict in range(predict_num): + trained_data = tf.keras.models.Model(inputs=model.input, outputs=model.get_layer('output').output).predict( + each_predict_data, batch_size=batch_size) # (batch_size,filer_num,1) + # trained_data=tf.reshape(trained_data,[batch_size,filter_num,1]) + # trained_data = tf.expand_dims(trained_data, axis=0) + + each_predict_data = tf.concat([each_predict_data[1:, :, :], tf.expand_dims(trained_data[-1, :, :], axis=0)], + axis=0) + all_data = tf.concat([all_data, tf.expand_dims(trained_data[-1, :, :], axis=0)], axis=0) + + # 获取到的所有data进行画图 + print(all_data.shape) # (700,600,1) + samples, filter_num, dims = all_data.shape + # all_data = tf.transpose(all_data, [1, 2, 0]) + all_data = tf.reshape(all_data, shape=[samples, filter_num * dims]) # (100,240*5) + flatten_all_data = np.zeros(shape=[samples + filter_num * dims, ]) + (all_dims,) = flatten_all_data.shape + + # 将所有数据展平为一维进行画图 + for each in range(samples): + if each == 0: + flatten_all_data[:filter_num * dims] = all_data[each, :] + else: + flatten_all_data[filter_num * dims + each] = all_data[each, -1] + + print(flatten_all_data.shape) # (1300,) + + (all_dims,) = flatten_all_data.shape + all_x = np.arange(all_dims) + print(all_x.shape) + # + # # 获取到的所有data进行画图 + # print(predict_data.shape) # (240,100,5) + # epoch, dims, filter_num = predict_data.shape + # predict_data = tf.transpose(predict_data, [1, 2, 0]) + # all_data = tf.reshape(predict_data, shape=[dims, filter_num * epoch]) # (100,240*5) + # before_data = np.empty(shape=[dims + filter_num * epoch, ]) + # # 将所有数据展平为一维进行画图 + # for each in range(dims): + # if each == 0: + # before_data[:filter_num * epoch] = all_data[each, :] + # else: + # before_data[filter_num * epoch + each] = all_data[each, -1] + # + # print(before_data.shape) # (1300,) + # print(before_data) + # (before_dims,) = before_data.shape + + # + before_data = total_data + (before_dims,) = before_data.shape + before_x = np.arange(before_dims) + all_x = np.arange(all_dims) + + plt.plot(flatten_all_data) + plt.plot(before_data) + plt.show() + + print("before_data.shape:",before_data.shape) + print("flatten_all_data.shape:",flatten_all_data.shape) + print("before_x.shape:",before_x.shape) + print("all_x.shape:",all_x.shape) + plt.plot(before_x, before_data) + plt.scatter(before_x, before_data) + plt.scatter(all_x, flatten_all_data) + plt.show() diff --git a/TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/train/LSTM4.py b/TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/train/LSTM4.py new file mode 100644 index 0000000..05d64d8 --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/train/LSTM4.py @@ -0,0 +1,481 @@ +import tensorflow as tf +import numpy as np +import matplotlib.pyplot as plt +from model.LSTM.before.LSTM_realize_self4 import LSTM_realize, PredictModel +import os +import shutil + +# TODO 使用函数式编程的方式书写model.fit对应于self4 +'''说明:将输入LSTM的维度理解为多少维度个点,LSTM的一个cell所做的事就是根据这dim个点得出dim+1个点的信息''' + +# 超参数设置 +filter_num = 500 # 时间部 +dims = 50 # 表示一个点的维度 +unit = 20 +batch_size = 10 +EPOCH = 100 +model_name = 'SA_ConvLSTM' +predict_num = 50 + +save_name = "../model/weight/{0}_unit{1}_FilterNum{2}_Dims{3}_Epoch{4}_weight/weight".format(model_name, unit, + filter_num, dims, + EPOCH) +save_loss_name = "../model/loss/{0}_unit{1}_FilterNum{2}_Dims{3}_Epoch{4}_loss.npy".format(model_name, unit, filter_num, + dims, + EPOCH) + +# save_name = "../model/weight/{0}_FilterNum{1}_Dims{2}_BatchSize{3}_Epoch{4}_weight".format(model_name, +# filter_num, dims, +# batch_size, EPOCH) +# save_loss_name = "../model/loss/{0}_FilterNum{1}_Dims{2}_BatchSize{3}_Epoch{4}_loss.npy".format(model_name, unit, +# filter_num, +# dims, +# batch_size, EPOCH) + +predict_name = "../data/predict_data/{0}_FilterNum{1}_Dims{2}_BatchSize{3}_Epoch{4}_predict{5}.npy".format(model_name, + filter_num, + dims, + batch_size, + EPOCH, + predict_num) + +# 数据读入 +HI_merge_data_origin = np.load("../HI_create/HI_merge_data.npy") + +# plt.plot(HI_merge_data[0:1250, 1]) +# 去除掉退化特征不明显前面的点 +HI_merge_data = HI_merge_data_origin[0:1201, 1] +print(HI_merge_data) +print(HI_merge_data.shape) +# plt.plot(HI_merge_data) +# plt.show() +(total_dims,) = HI_merge_data.shape +# # 将其分成重叠采样状态-滑动窗口函数 +# # train_data =np.lib.stride_stricks.sliding_window_view(HI_merge_data, 3) +# # train_data =np.lib.stride_stricks.as_strided(HI_merge_data,(1240,10),(10,)) +# +train_data = np.empty(shape=[total_dims - filter_num - 1, filter_num]) + +predict_data = np.empty(shape=[total_dims - filter_num, filter_num]) +z = 0 +# 重叠采样获取时间部和训练次数 +for dim in range(total_dims): + + if dim + filter_num >= total_dims: + break + predict_data[dim] = HI_merge_data[dim:dim + filter_num] + + if dim + filter_num < total_dims - 1: + train_data[dim] = HI_merge_data[dim:dim + filter_num] + +a, _ = train_data.shape +train_label = predict_data[dims + 1:, :] +b, _ = predict_data.shape + +# 再重叠采样获取一个点的维度 +'''train_data.shape:(sample,filter_num)''' +resample_train_data = train_data[:a - dims, :] +resample_train_data = np.expand_dims(resample_train_data, axis=0) + +resample_predict_data = predict_data[:b - dims, :] +resample_predict_data = np.expand_dims(resample_predict_data, axis=0) + +for dim in range(dims - 1): + resample_train_data2 = train_data[(dim + 1):(a - dims + dim + 1), :] + resample_train_data2 = np.expand_dims(resample_train_data2, axis=0) + resample_train_data = tf.concat([resample_train_data, resample_train_data2], axis=0) + + resample_predict_data2 = predict_data[(dim + 1):(b - dims + dim + 1), :] + resample_predict_data2 = np.expand_dims(resample_predict_data2, axis=0) + resample_predict_data = tf.concat([resample_predict_data, resample_predict_data2], axis=0) + +resample_train_data = np.transpose(resample_train_data, [1, 2, 0]) +train_data = resample_train_data + +resample_predict_data = np.transpose(resample_predict_data, [1, 2, 0]) +predict_data = resample_predict_data + +print(train_data.shape) # (649,600) +print(train_data) +print(predict_data.shape) # (650,600) +print(predict_data) +print(train_label.shape) # (649,600) +print(train_label) +# # # # +# # np.save("../data/trian_data/train_data.npy", train_data) +# # np.save("../data/trian_data/train_label.npy", train_label) +# # np.save("../data/trian_data/predict_data.npy",predict_data) +# # np.save("../data/trian_data/total_data.npy",HI_merge_data) +# # # train_label= +# +# +# 处理好的train_data的读取 +''' +train_data.shape: (total_dims - filter_num - 1, filter_num,dims) :(570,600,30) +predict_data.shape: (total_dims - filter_num, filter_num) :(571,600,30) +train_label.shape: (total_dims - filter_num - 1, filter_num) :(570,600) +''' + + +def remove(train_data, train_label, batch_size): + epoch, _, _ = train_data.shape + size = int(epoch / batch_size) + return train_data[:size * batch_size], train_label[:size * batch_size] + + +# # 数据处理 - 分成一个epoch训练240次 +train_data, train_label = remove(train_data, train_label, batch_size) # 去除直至能整除 +# train_data = np.expand_dims(train_data, axis=-1) # (649,600,1) +# predict_data = np.expand_dims(predict_data, axis=-1) # (650,600,1) +train_label = train_label +query_label = np.expand_dims(train_label, axis=-1) # (649,600,1) +total_data = HI_merge_data + +train_data = tf.cast(train_data, dtype=tf.float32) +predict_data = tf.cast(predict_data, dtype=tf.float32) +train_label = tf.cast(train_label, dtype=tf.float32) +# query_label = tf.cast(query_label, dtype=tf.float32) +# todo 解决模型保存时,query无法序列化的问题 +query_label = np.array(query_label, dtype=np.float32) + +print("predict_data.shape:", predict_data.shape) # (21, 1200, 30) +print("train_data.shape:", train_data.shape) # (20, 1200, 30) +print("train_label.shape:", train_label.shape) # (20, 1200) +print("query_label.shape:", query_label.shape) # (20, 1200, 1) + + +def predict_model(): + input = tf.keras.Input(shape=[filter_num, dims]) + input = tf.cast(input, tf.float32) + LSTM_object = LSTM_realize(units=50, batch_size=batch_size, if_SA=True, if_Conv=True, query=query_label) + # LSTM_object = LSTM_realize(units=50, batch_size=batch_size, if_SA=False, if_Conv=False) + LSTM = LSTM_object(inputs=input, layer='SA_ConvLSTM') + # LSTM = LSTM_object(inputs=input, layer='LSTM') + drop = tf.keras.layers.Dropout(0.2)(LSTM) + bn = tf.keras.layers.BatchNormalization()(drop) + d1 = tf.keras.layers.Dense(32)(bn) + drop = tf.keras.layers.Dropout(0.2)(d1) + output = tf.keras.layers.Dense(1, name='output')(drop) + model = tf.keras.Model(inputs=input, outputs=output) + return model + + +# 仅使用预测出来的最新的一个点预测以后 +def predictOneByOne(predict_data, predict_num=30): + (samples, filter_num, dims) = predict_data.shape # predict_data.shape: (21, 1200, 30) + each_predict_data = predict_data[samples - batch_size:, :, :] # (5,filter_num,30) + all_data = total_data # (1201,) + for each_predict in range(predict_num): + trained_data = newModel.predict(each_predict_data, batch_size=batch_size) # (batch_size,filer_num,1) + + all_data = tf.concat([all_data, tf.expand_dims(trained_data[-1, -1, -1], axis=-1)], axis=0) + temp1 = tf.concat([each_predict_data[-1, -1, 1:], tf.expand_dims(trained_data[-1, -1, -1], axis=-1)], axis=0) + temp2 = tf.concat([each_predict_data[-1, 1:, :], tf.expand_dims(temp1, axis=0)], axis=0) + each_predict_data = tf.concat([each_predict_data[1:, :, :], tf.expand_dims(temp2, axis=0)], axis=0) + + return all_data + + +# 使用最后预测出来的一整行与之前的拼接 +def predictContinued(predict_data, predict_num=30): + # predict_num = 30 + (samples, filter_num, dims) = predict_data.shape # predict_data.shape: (21, 1200, 30) + each_predict_data = predict_data[samples - batch_size:, :, :] # (5,filter_num,30) + all_data = total_data # (1201,) + for each_predict in range(predict_num): + trained_data = newModel.predict( + each_predict_data, batch_size=batch_size) # (batch_size,filer_num,1) + + all_data = tf.concat([all_data, tf.expand_dims(trained_data[-1, -1, -1], axis=-1)], axis=0) + trained_data = tf.concat([each_predict_data[-1, :, 1:], trained_data[-1, :, :]], axis=-1) + # trained_data=tf.reshape(trained_data,[batch_size,filter_num,1]) + # trained_data = tf.expand_dims(trained_data, axis=0) + + each_predict_data = tf.concat([each_predict_data[1:, :, :], tf.expand_dims(trained_data, axis=0)], axis=0) + return all_data + + +# 使用最后预测出来的一整行与之前的拼接,预测函数使用自己的call函数实现,否则传统的两个方法的query就是一直使用的以前的,而且z一直是0 +def selfPredictContinued(predict_data, query_label, predict_num=30): + # predict_num = 30 + (samples, filter_num, dims) = predict_data.shape # predict_data.shape: (21, 1200, 30) + (samples1, filter_num1, dims1) = query_label.shape + # 只留下最后一个batch + each_predict_data = predict_data[samples - batch_size:, :, :] # (10,1200,30) + each_query_data = query_label[samples1 - batch_size:, :, :] # (10, 1200, 1) + # 当想要的下一个点的数据没有的时候,使用最后一个点当做下一个点的query + temp1 = each_query_data[-1, 1:, :] + temp2 = each_query_data[-1, -1, -1] + temp22 = tf.expand_dims(tf.expand_dims(temp2, axis=-1), axis=-1) + each_query_data = tf.concat([each_query_data[1:, :, :], tf.expand_dims(tf.concat([temp1, temp22], axis=0), axis=0)], + axis=0) + print(each_query_data.shape) # (10,1200,1) + + # 尝试一次预测 + each_query_data = each_query_data[-1, :, :] + each_predict_data = each_predict_data[-1, :, :] + each_predict_data = tf.expand_dims(each_predict_data, axis=0) + each_query_data = tf.expand_dims(each_query_data, axis=0) + + all_data = total_data # (1201,) + i = 1 # 用于看第几次预测了 + for each_predict in range(predict_num): + trained_data = newModel.call(each_predict_data, query=each_query_data, batch_size=1) # (batch_size,filer_num,1) + + print("第", i, "次预测已完成") + all_data = tf.concat([all_data, tf.expand_dims(trained_data[-1, -1, -1], axis=-1)], axis=0) + one_data = tf.concat([each_predict_data[-1, :, 1:], trained_data[-1, :, :]], axis=-1) + # 这里的1:-1是为了去掉上一次重复的那个 + temp1 = each_query_data[-1, 1:-1, :] + temp2 = tf.expand_dims(tf.expand_dims(trained_data[-1, -1, -1], axis=-1), axis=-1) + # 拼接两次上面的值,其中最后一次为与上一次类似的预测点 + temp3 = tf.concat([tf.concat([temp1, temp2], axis=0), temp2], axis=0) + each_query_data = tf.concat( + [each_query_data[1:, :, :], tf.expand_dims(temp3, axis=0)], axis=0) + each_predict_data = tf.concat([each_predict_data[1:, :, :], tf.expand_dims(one_data, axis=0)], axis=0) + i += 1 + return all_data + + +# 使用最后预测出来的最后一个与之前的拼接,预测函数使用自己的call函数实现,否则传统的两个方法的query就是一直使用的以前的,而且z一直是0 +def selfPredictOneByOne(predict_data, query_label, predict_num=30): + # predict_num = 30 + (samples, filter_num, dims) = predict_data.shape # predict_data.shape: (21, 1200, 30) + (samples1, filter_num1, dims1) = query_label.shape + # 只留下最后一个batch + each_predict_data = predict_data[samples - batch_size:, :, :] # (10,1200,30) + each_query_data = query_label[samples1 - batch_size:, :, :] # (10, 1200, 1) + # 当想要的下一个点的数据没有的时候,使用最后一个点当做下一个点的query + temp1 = each_query_data[-1, 1:, :] + temp2 = each_query_data[-1, -1, -1] + temp22 = tf.expand_dims(tf.expand_dims(temp2, axis=-1), axis=-1) + each_query_data = tf.concat([each_query_data[1:, :, :], tf.expand_dims(tf.concat([temp1, temp22], axis=0), axis=0)], + axis=0) + print(each_query_data.shape) # (10,1200,1) + + # 尝试一次预测 + each_query_data = each_query_data[-1, :, :] + each_predict_data = each_predict_data[-1, :, :] + each_predict_data = tf.expand_dims(each_predict_data, axis=0) + each_query_data = tf.expand_dims(each_query_data, axis=0) + + all_data = total_data # (1201,) + i = 1 # 用于看第几次预测了 + for each_predict in range(predict_num): + trained_data = newModel.call(each_predict_data, query=each_query_data, batch_size=1) # (batch_size,filer_num,1) + + print("第", i, "次预测已完成") + all_data = tf.concat([all_data, tf.expand_dims(trained_data[-1, -1, -1], axis=-1)], axis=0) + + _temp1 = tf.concat([each_predict_data[-1, -1, 1:], tf.expand_dims(trained_data[-1, -1, -1], axis=-1)], axis=0) + _temp2 = tf.concat([each_predict_data[-1, 1:, :], tf.expand_dims(_temp1, axis=0)], axis=0) + # one_data = tf.concat([each_predict_data[-1, :, 1:], temp1], axis=-1) + # 这里的1:-1是为了去掉上一次重复的那个 + temp1 = each_query_data[-1, 1:-1, :] + temp2 = tf.expand_dims(tf.expand_dims(trained_data[-1, -1, -1], axis=-1), axis=-1) + # 拼接两次上面的值,其中最后一次为与上一次类似的预测点 + temp3 = tf.concat([tf.concat([temp1, temp2], axis=0), temp2], axis=0) + each_query_data = tf.concat( + [each_query_data[1:, :, :], tf.expand_dims(temp3, axis=0)], axis=0) + each_predict_data = tf.concat([each_predict_data[1:, :, :], tf.expand_dims(_temp2, axis=0)], axis=0) + i += 1 + return all_data + + +def folderGenerate(folder_name): + if not os.path.exists(folder_name): + os.mkdir(folder_name) + + +# 递归删除文件夹 +def folderDelete(folder_name): + if os.path.exists(folder_name): + shutil.rmtree(folder_name) + + +# 判断这次是否进行模型保存,history_loss存储历史上的loss +def SaveBestModel(history_loss, loss_value): + weight_folder = save_name[:-7] + + # 如果history_loss为空,那么直接保存 + if len(history_loss) == 0: + folderGenerate(weight_folder) + model.save_weights(save_name) + return + + # 先判断要不要存模型,如果上一次的比这一次的loss要大,就保存这一次的 + if np.min(history_loss) > loss_value: + # 删除上一次的保存这一次的 + folderDelete(weight_folder) + folderGenerate(weight_folder) + model.save_weights(save_name) + return + + pass + + +def IsStopTraining(history_loss, patience=5): + if len(history_loss) <= patience: + return False + for i in range(1, patience): + if history_loss[-(patience + 1)] > history_loss[-i]: + return False + print(patience, "次loss未下降,训练停止") + return True + + +def shuffle(data, label): + label = tf.expand_dims(label, axis=-1) + total = tf.concat([data, label], axis=-1) + total = tf.random.shuffle(total) + data = total[:, :, :-1] + label = total[:, :, -1] + query = tf.expand_dims(label, axis=-1) + return data, label, query + + +def splitValData(data, label, val_radio=0.2): + size, filter_num, dims = data.shape + val_data = data[:int(size * val_radio), :, :] + train_data = data[int(size * val_radio):, :, :] + val_label = label[:int(size * val_radio), :] + train_label = label[int(size * val_radio):, :] + val_query = tf.expand_dims(val_label, axis=-1) + train_query = tf.expand_dims(train_label, axis=-1) + + return (train_data, train_label, train_query), (val_data, val_label, val_query) + + +def Is_Reduce_learning_rate(history_loss, patience=3): + if len(history_loss) <= patience: + return False + for i in range(patience): + if history_loss[-(patience + 1)] > history_loss[-i]: + return False + print(patience,"次loss未下降,降低学习率") + return True + + +# +if __name__ == '__main__': + # model = predict_model() + # # opt = tf.optimizers.Adam(1e-3) + model = PredictModel(filter_num=filter_num, dims=dims, batch_size=batch_size, query_label=query_label) + # + # # # # TODO 需要运行编译一次,才能打印model.summary() + # model.call(inputs=train_data[0:batch_size], label=train_label[0:batch_size]) + # model.build(input_shape=(batch_size, filter_num, dims)) + # model.summary() + + # + history_loss = [] + history_val_loss = [] + learning_rate = 1e-3 + for epoch in range(EPOCH): + train_data, train_label, query_label = shuffle(train_data, train_label) + if epoch == 0: + (train_data, train_label, query_label), (val_data, val_label, val_query) = splitValData(data=train_data, + label=train_label, + val_radio=0.2) + print() + print("EPOCH:", epoch, "/", EPOCH, ":") + # 用于让train知道,这是这个epoch中的第几次训练 + z = 0 + # 用于batch_size次再训练 + k = 1 + for data_1, label_1 in zip(train_data, train_label): + size, _, _ = train_data.shape + data_1 = tf.expand_dims(data_1, axis=0) + label_1 = tf.expand_dims(label_1, axis=0) + if batch_size != 1: + if k % batch_size == 1: + data = data_1 + label = label_1 + else: + data = tf.concat([data, data_1], axis=0) + label = tf.concat([label, label_1], axis=0) + else: + data = data_1 + label = label_1 + + if k % batch_size == 0: + label = tf.expand_dims(label, axis=-1) + loss_value = model.train(input_tensor=data, label=label, query=query_label, learning_rate=learning_rate, + z=z) + print(z * batch_size, "/", size, ":===============>", "loss:", loss_value.numpy()) + k = 0 + z = z + 1 + k = k + 1 + + val_loss = model.get_val_loss(val_data=val_data, val_label=val_label, val_query=val_query, batch_size=1) + SaveBestModel(history_loss=history_val_loss, loss_value=val_loss.numpy()) + history_val_loss.append(val_loss) + history_loss.append(loss_value.numpy()) + print('Training loss is :', loss_value.numpy()) + print('Validating loss is :', val_loss.numpy()) + if IsStopTraining(history_loss=history_val_loss, patience=7): + break + if Is_Reduce_learning_rate(history_loss=history_val_loss, patience=3): + learning_rate = 1e-4 + + # loss_folder = save_loss_name[:-4] + # folderGenerate(loss_folder) + # np.save(save_loss_name, history_loss) + + model.save_weights(save_name) + newModel = PredictModel(filter_num=filter_num, dims=dims, batch_size=batch_size, query_label=query_label) + # newModel.load_weights(save_name) + # # history_loss=np.load(save_loss_name) + # # print(history_loss) + # + # + # # # ''' + # # # train_data.shape:(570,600,30) + # # # predict_data.shape:(571,600,30) + # # # train_label.shape:(570,600) + # # # query_label.shape:(570,600,1) + # # # ''' + # # 连续预测五十个点并画出来 + predict_num = 50 + # all_data = predictContinued(predict_data=predict_data, predict_num=predict_num) + # all_data = predictOneByOne(predict_data=predict_data, predict_num=predict_num) + # all_data = selfPredictContinued(predict_data=predict_data, query_label=query_label, predict_num=predict_num) + all_data = selfPredictOneByOne(predict_data=predict_data, query_label=query_label, predict_num=predict_num) + + # 获取到的所有data进行画图 + print(all_data.shape) # (700,600,1) + (all_dims,) = all_data.shape + all_x = np.arange(all_dims) + print(all_x.shape) + # np.save(predict_name, all_data) + + before_data = total_data + (before_dims,) = before_data.shape + before_x = np.arange(before_dims) + all_x = np.arange(all_dims) + + fig5 = plt.figure() + ax5 = fig5.add_subplot(2, 1, 1) + ax5.plot(all_data) + ax5.plot(before_data) + ax51 = fig5.add_subplot(2, 1, 2) + ax51.plot(HI_merge_data_origin[0:all_dims, 1]) + plt.show() + + print("before_data.shape:", before_data.shape) + print("flatten_all_data.shape:", all_data.shape) + print("before_x.shape:", before_x.shape) + print("all_x.shape:", all_x.shape) + + fig6 = plt.figure() + ax6 = fig6.add_subplot() + plt.plot(before_x, before_data) + plt.scatter(before_x, before_data) + plt.scatter(all_x, all_data) + plt.show() + + # fig4 = plt.figure() + # ax4 = fig4.add_subplot() + # plt.plot(history.epoch, history.history.get('loss'), label='loss') + # # plt.plot(history.epoch, history.history.get('val_loss'), label='val_loss') + # plt.show() diff --git a/TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/train/LSTM5.py b/TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/train/LSTM5.py new file mode 100644 index 0000000..a25dcba --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/train/LSTM5.py @@ -0,0 +1,592 @@ +import tensorflow as tf +import numpy as np +from model.LSTM.before.LSTM_realize_self5 import LSTM_realize, PredictModel +from keras.callbacks import EarlyStopping +import os +import shutil +from model.LossFunction.FTMSE import FTMSE + +# TODO 使用函数式编程的方式书写model.fit对应于self5,LSTM参数共享测试 +'''说明:将输入LSTM的维度理解为多少维度个点,LSTM的一个cell所做的事就是根据这dim个点得出dim+1个点的信息''' + + +# save_name = "../model/weight/{0}_FilterNum{1}_Dims{2}_BatchSize{3}_Epoch{4}_weight".format(model_name, +# filter_num, dims, +# batch_size, EPOCH) +# save_loss_name = "../model/loss/{0}_FilterNum{1}_Dims{2}_BatchSize{3}_Epoch{4}_loss.npy".format(model_name, unit, +# filter_num, +# dims, +# batch_size, EPOCH) + + +def getData(filter_num, dims, batch_size): + # 数据读入 + HI_merge_data_origin = np.load("../HI_create/HI_merge_data.npy") + + # plt.plot(HI_merge_data[0:1250, 1]) + # 去除掉退化特征不明显前面的点 + HI_merge_data = HI_merge_data_origin[0:1201, 1] + print(HI_merge_data) + print(HI_merge_data.shape) + # plt.plot(HI_merge_data) + # plt.show() + (total_dims,) = HI_merge_data.shape + # # 将其分成重叠采样状态-滑动窗口函数 + # # train_data =np.lib.stride_stricks.sliding_window_view(HI_merge_data, 3) + # # train_data =np.lib.stride_stricks.as_strided(HI_merge_data,(1240,10),(10,)) + # + train_data = np.empty(shape=[total_dims - filter_num - 1, filter_num]) + + predict_data = np.empty(shape=[total_dims - filter_num, filter_num]) + z = 0 + # 重叠采样获取时间部和训练次数 + for dim in range(total_dims): + + if dim + filter_num >= total_dims: + break + predict_data[dim] = HI_merge_data[dim:dim + filter_num] + + if dim + filter_num < total_dims - 1: + train_data[dim] = HI_merge_data[dim:dim + filter_num] + + a, _ = train_data.shape + train_label = predict_data[dims + 1:, :] + b, _ = predict_data.shape + + # 再重叠采样获取一个点的维度 + '''train_data.shape:(sample,filter_num)''' + resample_train_data = train_data[:a - dims, :] + resample_train_data = np.expand_dims(resample_train_data, axis=0) + + resample_predict_data = predict_data[:b - dims, :] + resample_predict_data = np.expand_dims(resample_predict_data, axis=0) + + for dim in range(dims - 1): + resample_train_data2 = train_data[(dim + 1):(a - dims + dim + 1), :] + resample_train_data2 = np.expand_dims(resample_train_data2, axis=0) + resample_train_data = tf.concat([resample_train_data, resample_train_data2], axis=0) + + resample_predict_data2 = predict_data[(dim + 1):(b - dims + dim + 1), :] + resample_predict_data2 = np.expand_dims(resample_predict_data2, axis=0) + resample_predict_data = tf.concat([resample_predict_data, resample_predict_data2], axis=0) + + resample_train_data = np.transpose(resample_train_data, [1, 2, 0]) + train_data = resample_train_data + + resample_predict_data = np.transpose(resample_predict_data, [1, 2, 0]) + predict_data = resample_predict_data + + print(train_data.shape) # (649,600) + print(train_data) + print(predict_data.shape) # (650,600) + print(predict_data) + print(train_label.shape) # (649,600) + print(train_label) + + # # 数据处理 - 分成一个epoch训练240次 + # train_data, train_label = remove(train_data, train_label, batch_size) # 去除直至能整除 + # train_data = np.expand_dims(train_data, axis=-1) # (649,600,1) + # predict_data = np.expand_dims(predict_data, axis=-1) # (650,600,1) + train_label = train_label + query_label = np.expand_dims(train_label, axis=-1) # (649,600,1) + total_data = HI_merge_data + + train_data = tf.cast(train_data, dtype=tf.float32) + predict_data = tf.cast(predict_data, dtype=tf.float32) + train_label = tf.cast(tf.expand_dims(train_label, axis=-1), dtype=tf.float32) + # query_label = tf.cast(query_label, dtype=tf.float32) + # todo 解决模型保存时,query无法序列化的问题 + query_label = np.array(query_label, dtype=np.float32) + + print("predict_data.shape:", predict_data.shape) # (21, 1200, 30) + print("train_data.shape:", train_data.shape) # (20, 1200, 30) + print("train_label.shape:", train_label.shape) # (20, 1200) + print("query_label.shape:", query_label.shape) # (20, 1200, 1) + return predict_data, train_data, train_label, query_label, total_data, HI_merge_data_origin + + +# # # # +# # np.save("../data/trian_data/train_data.npy", train_data) +# # np.save("../data/trian_data/train_label.npy", train_label) +# # np.save("../data/trian_data/predict_data.npy",predict_data) +# # np.save("../data/trian_data/total_data.npy",HI_merge_data) +# # # train_label= +# +# +# 处理好的train_data的读取 +''' +train_data.shape: (total_dims - filter_num - 1, filter_num,dims) :(570,600,30) +predict_data.shape: (total_dims - filter_num, filter_num) :(571,600,30) +train_label.shape: (total_dims - filter_num - 1, filter_num) :(570,600) +''' + + +def remove(train_data, train_label, batch_size): + epoch, _, _ = train_data.shape + size = int(epoch / batch_size) + return train_data[:size * batch_size], train_label[:size * batch_size] + + +# 仅使用预测出来的最新的一个点预测以后没有batch_size +def predictOneByOneWithBatch1(newModel, predict_data, predict_num=30): + (samples, filter_num, dims) = predict_data.shape # predict_data.shape: (21, 1200, 30) + each_predict_data = predict_data[-1, :, :] # (5,filter_num,30) + each_predict_data = tf.expand_dims(each_predict_data, axis=0) + all_data = total_data # (1201,) + i = 1 # 用于看第几次预测 + for each_predict in range(predict_num): + trained_data = newModel.predict(each_predict_data) # (batch_size,filer_num,1) + + all_data = tf.concat([all_data, tf.expand_dims(trained_data[-1, -1, -1], axis=-1)], axis=0) + temp1 = tf.concat([each_predict_data[-1, -1, 1:], tf.expand_dims(trained_data[-1, -1, -1], axis=-1)], axis=0) + temp2 = tf.concat([each_predict_data[-1, 1:, :], tf.expand_dims(temp1, axis=0)], axis=0) + each_predict_data = tf.concat([each_predict_data[1:, :, :], tf.expand_dims(temp2, axis=0)], axis=0) + print("第", i, "次预测已经完成") + i += 1 + return all_data + + +# 仅使用预测出来的最新的一个点预测以后 +def predictOneByOne(newModel, predict_data, predict_num=30): + (samples, filter_num, dims) = predict_data.shape # predict_data.shape: (21, 1200, 30) + each_predict_data = predict_data[samples - batch_size:, :, :] # (5,filter_num,30) + all_data = total_data # (1201,) + for each_predict in range(predict_num): + trained_data = newModel.predict(each_predict_data, batch_size=batch_size) # (batch_size,filer_num,1) + + all_data = tf.concat([all_data, tf.expand_dims(trained_data[-1, -1, -1], axis=-1)], axis=0) + temp1 = tf.concat([each_predict_data[-1, -1, 1:], tf.expand_dims(trained_data[-1, -1, -1], axis=-1)], axis=0) + temp2 = tf.concat([each_predict_data[-1, 1:, :], tf.expand_dims(temp1, axis=0)], axis=0) + each_predict_data = tf.concat([each_predict_data[1:, :, :], tf.expand_dims(temp2, axis=0)], axis=0) + + return all_data + + +# 使用最后预测出来的一整行与之前的拼接 +def predictContinued(newModel, predict_data, predict_num=30): + # predict_num = 30 + (samples, filter_num, dims) = predict_data.shape # predict_data.shape: (21, 1200, 30) + each_predict_data = predict_data[samples - batch_size:, :, :] # (5,filter_num,30) + all_data = total_data # (1201,) + for each_predict in range(predict_num): + trained_data = newModel.predict( + each_predict_data, batch_size=batch_size) # (batch_size,filer_num,1) + + all_data = tf.concat([all_data, tf.expand_dims(trained_data[-1, -1, -1], axis=-1)], axis=0) + trained_data = tf.concat([each_predict_data[-1, :, 1:], trained_data[-1, :, :]], axis=-1) + # trained_data=tf.reshape(trained_data,[batch_size,filter_num,1]) + # trained_data = tf.expand_dims(trained_data, axis=0) + + each_predict_data = tf.concat([each_predict_data[1:, :, :], tf.expand_dims(trained_data, axis=0)], axis=0) + return all_data + + +# 使用最后预测出来的一整行与之前的拼接,预测函数使用自己的call函数实现,否则传统的两个方法的query就是一直使用的以前的,而且z一直是0 +def selfPredictContinued(newModel, predict_data, query_label, predict_num=30): + # predict_num = 30 + (samples, filter_num, dims) = predict_data.shape # predict_data.shape: (21, 1200, 30) + (samples1, filter_num1, dims1) = query_label.shape + # 只留下最后一个batch + each_predict_data = predict_data[samples - batch_size:, :, :] # (10,1200,30) + each_query_data = query_label[samples1 - batch_size:, :, :] # (10, 1200, 1) + # 当想要的下一个点的数据没有的时候,使用最后一个点当做下一个点的query + temp1 = each_query_data[-1, 1:, :] + temp2 = each_query_data[-1, -1, -1] + temp22 = tf.expand_dims(tf.expand_dims(temp2, axis=-1), axis=-1) + each_query_data = tf.concat([each_query_data[1:, :, :], tf.expand_dims(tf.concat([temp1, temp22], axis=0), axis=0)], + axis=0) + print(each_query_data.shape) # (10,1200,1) + + # 尝试一次预测 + each_query_data = each_query_data[-1, :, :] + each_predict_data = each_predict_data[-1, :, :] + each_predict_data = tf.expand_dims(each_predict_data, axis=0) + each_query_data = tf.expand_dims(each_query_data, axis=0) + + all_data = total_data # (1201,) + i = 1 # 用于看第几次预测了 + for each_predict in range(predict_num): + trained_data = newModel.call(each_predict_data, query=each_query_data, batch_size=1) # (batch_size,filer_num,1) + + print("第", i, "次预测已完成") + all_data = tf.concat([all_data, tf.expand_dims(trained_data[-1, -1, -1], axis=-1)], axis=0) + one_data = tf.concat([each_predict_data[-1, :, 1:], trained_data[-1, :, :]], axis=-1) + # 这里的1:-1是为了去掉上一次重复的那个 + temp1 = each_query_data[-1, 1:-1, :] + temp2 = tf.expand_dims(tf.expand_dims(trained_data[-1, -1, -1], axis=-1), axis=-1) + # 拼接两次上面的值,其中最后一次为与上一次类似的预测点 + temp3 = tf.concat([tf.concat([temp1, temp2], axis=0), temp2], axis=0) + each_query_data = tf.concat( + [each_query_data[1:, :, :], tf.expand_dims(temp3, axis=0)], axis=0) + each_predict_data = tf.concat([each_predict_data[1:, :, :], tf.expand_dims(one_data, axis=0)], axis=0) + i += 1 + return all_data + + +# 使用最后预测出来的最后一个与之前的拼接,预测函数使用自己的call函数实现,否则传统的两个方法的query就是一直使用的以前的,而且z一直是0 +def selfPredictOneByOne(newModel, predict_data, query_label, predict_num=30): + # predict_num = 30 + (samples, filter_num, dims) = predict_data.shape # predict_data.shape: (21, 1200, 30) + (samples1, filter_num1, dims1) = query_label.shape + # 只留下最后一个batch + each_predict_data = predict_data[samples - batch_size:, :, :] # (10,1200,30) + each_query_data = query_label[samples1 - batch_size:, :, :] # (10, 1200, 1) + # 当想要的下一个点的数据没有的时候,使用最后一个点当做下一个点的query + temp1 = each_query_data[-1, 1:, :] + temp2 = each_query_data[-1, -1, -1] + temp22 = tf.expand_dims(tf.expand_dims(temp2, axis=-1), axis=-1) + each_query_data = tf.concat([each_query_data[1:, :, :], tf.expand_dims(tf.concat([temp1, temp22], axis=0), axis=0)], + axis=0) + print(each_query_data.shape) # (10,1200,1) + + # 尝试一次预测 + each_query_data = each_query_data[-1, :, :] + each_predict_data = each_predict_data[-1, :, :] + each_predict_data = tf.expand_dims(each_predict_data, axis=0) + each_query_data = tf.expand_dims(each_query_data, axis=0) + + all_data = total_data # (1201,) + i = 1 # 用于看第几次预测了 + for each_predict in range(predict_num): + trained_data = newModel.call(each_predict_data, query=each_query_data, batch_size=1) # (batch_size,filer_num,1) + + print("第", i, "次预测已完成") + all_data = tf.concat([all_data, tf.expand_dims(trained_data[-1, -1, -1], axis=-1)], axis=0) + + _temp1 = tf.concat([each_predict_data[-1, -1, 1:], tf.expand_dims(trained_data[-1, -1, -1], axis=-1)], axis=0) + _temp2 = tf.concat([each_predict_data[-1, 1:, :], tf.expand_dims(_temp1, axis=0)], axis=0) + # one_data = tf.concat([each_predict_data[-1, :, 1:], temp1], axis=-1) + # 这里的1:-1是为了去掉上一次重复的那个 + temp1 = each_query_data[-1, 1:-1, :] + temp2 = tf.expand_dims(tf.expand_dims(trained_data[-1, -1, -1], axis=-1), axis=-1) + # 拼接两次上面的值,其中最后一次为与上一次类似的预测点 + temp3 = tf.concat([tf.concat([temp1, temp2], axis=0), temp2], axis=0) + each_query_data = tf.concat( + [each_query_data[1:, :, :], tf.expand_dims(temp3, axis=0)], axis=0) + each_predict_data = tf.concat([each_predict_data[1:, :, :], tf.expand_dims(_temp2, axis=0)], axis=0) + i += 1 + return all_data + + +def folderGenerate(folder_name): + if not os.path.exists(folder_name): + os.mkdir(folder_name) + + +# 递归删除文件夹 +def folderDelete(folder_name): + if os.path.exists(folder_name): + shutil.rmtree(folder_name) + + +# 判断这次是否进行模型保存,history_loss存储历史上的loss +def SaveBestModel(model, history_loss, loss_value): + weight_folder = save_name[:-7] + + # 如果history_loss为空,那么直接保存 + if len(history_loss) == 0: + folderGenerate(weight_folder) + model.save_weights(save_name) + return + + # 先判断要不要存模型,如果上一次的比这一次的loss要大,就保存这一次的 + if np.min(history_loss) > loss_value: + # 删除上一次的保存这一次的 + folderDelete(weight_folder) + folderGenerate(weight_folder) + model.save_weights(save_name) + print("保存这次模型") + return + + pass + + +def IsStopTraining(history_loss, patience=5): + if len(history_loss) <= patience: + return False + for i in range(1, patience): + if history_loss[-(patience + 1)] > history_loss[-i]: + return False + print(patience, "次loss未下降,训练停止") + return True + + +def shuffle(data, label): + label = tf.expand_dims(label, axis=-1) + total = tf.concat([data, label], axis=-1) + total = tf.random.shuffle(total) + data = total[:, :, :-1] + label = total[:, :, -1] + query = tf.expand_dims(label, axis=-1) + return data, label, query + + +def splitValData(data, label, val_radio=0.2): + size, filter_num, dims = data.shape + val_data = data[:int(size * val_radio), :, :] + train_data = data[int(size * val_radio):, :, :] + val_label = label[:int(size * val_radio), :] + train_label = label[int(size * val_radio):, :] + val_query = tf.expand_dims(val_label, axis=-1) + train_query = tf.expand_dims(train_label, axis=-1) + + return (train_data, train_label, train_query), (val_data, val_label, val_query) + + +def Is_Reduce_learning_rate(history_loss, patience=3): + if len(history_loss) <= patience: + return False + for i in range(patience): + if history_loss[-(patience + 1)] > history_loss[-i]: + return False + print(patience, "次loss未下降,降低学习率") + return True + + +def predict_model(): + input = tf.keras.Input(shape=[filter_num, dims]) + input = tf.cast(input, tf.float32) + LSTM = LSTM_realize(units=10, batch_size=batch_size, if_SA=False, if_Conv=True, if_mutiHead=False, + num_heads=2)(inputs=input, layer='ConvLSTM') + + # LSTM = LSTM_object(inputs=input, layer='SA_ConvLSTM1') + # LSTM = tf.keras.layers.LSTM(units=20,return_sequences=True)(input) + # LSTM = tf.keras.layers.LSTM(units=10,return_sequences=True)(input) + bn = tf.keras.layers.BatchNormalization()(LSTM) + drop = tf.keras.layers.Dropout(0.2)(bn) + + # # LSTM_object = LSTM_realize(units=256, batch_size=batch_size, if_SA=False, if_Conv=False, if_mutiHead=False, + # # num_heads=2) + # # LSTM = LSTM_object(inputs=drop, layer='LSTM') + # LSTM=tf.keras.layers.LSTM(units=256,return_sequences=True)(drop) + # bn = tf.keras.layers.BatchNormalization()(LSTM) + # drop = tf.keras.layers.Dropout(0.2)(bn) + # + # + # # LSTM_object = LSTM_realize(units=128, batch_size=batch_size, if_SA=False, if_Conv=False, if_mutiHead=False, + # # num_heads=2) + # # LSTM = LSTM_object(inputs=drop, layer='LSTM') + # LSTM = tf.keras.layers.LSTM(units=128, return_sequences=True)(drop) + # bn = tf.keras.layers.BatchNormalization()(LSTM) + # drop = tf.keras.layers.Dropout(0.2)(bn) + # # LSTM = LSTM_object(inputs=input, layer='LSTM') + # d1 = tf.keras.layers.Dense(64)(drop) + # bn = tf.keras.layers.BatchNormalization()(d1) + # drop = tf.keras.layers.Dropout(0.2)(bn) + # + # d1 = tf.keras.layers.Dense(32)(drop) + # bn = tf.keras.layers.BatchNormalization()(d1) + # drop = tf.keras.layers.Dropout(0.2)(bn) + # + d1 = tf.keras.layers.Dense(5)(drop) + bn = tf.keras.layers.BatchNormalization()(d1) + + # drop = tf.keras.layers.Dropout(0.2)(bn) + output = tf.keras.layers.Dense(1, name='output')(bn) + model = tf.keras.Model(inputs=input, outputs=output) + return model + + +def self_train(): + model = PredictModel(batch_size=batch_size) + # # # # TODO 需要运行编译一次,才能打印model.summary() + # model.build(input_shape=(batch_size, filter_num, dims)) + # model.summary() + history_loss = [] + history_val_loss = [] + learning_rate = 1e-3 + for epoch in range(EPOCH): + train_data, train_label, query_label = shuffle(train_data, train_label) + if epoch == 0: + (train_data, train_label, query_label), (val_data, val_label, val_query) = splitValData(data=train_data, + label=train_label, + val_radio=0.2) + print() + print("EPOCH:", epoch, "/", EPOCH, ":") + # 用于让train知道,这是这个epoch中的第几次训练 + z = 0 + # 用于batch_size次再训练 + k = 1 + for data_1, label_1 in zip(train_data, train_label): + size, _, _ = train_data.shape + data_1 = tf.expand_dims(data_1, axis=0) + label_1 = tf.expand_dims(label_1, axis=0) + if batch_size != 1: + if k % batch_size == 1: + data = data_1 + label = label_1 + else: + data = tf.concat([data, data_1], axis=0) + label = tf.concat([label, label_1], axis=0) + else: + data = data_1 + label = label_1 + + if k % batch_size == 0: + label = tf.expand_dims(label, axis=-1) + loss_value = model.train(input_tensor=data, label=label, query=query_label, learning_rate=learning_rate, + z=z) + print(z * batch_size, "/", size, ":===============>", "loss:", loss_value.numpy()) + k = 0 + z = z + 1 + k = k + 1 + + val_loss = model.get_val_loss(val_data=val_data, val_label=val_label, val_query=val_query, batch_size=1) + SaveBestModel(model=model, history_loss=history_val_loss, loss_value=val_loss.numpy()) + history_val_loss.append(val_loss) + history_loss.append(loss_value.numpy()) + print('Training loss is :', loss_value.numpy()) + print('Validating loss is :', val_loss.numpy()) + if IsStopTraining(history_loss=history_val_loss, patience=7): + break + if Is_Reduce_learning_rate(history_loss=history_val_loss, patience=3): + learning_rate = 1e-4 + + +def lr_schedule(epoch): + # Learning Rate Schedule + + lr = 1e-3 + total_epochs = EPOCH + check_1 = int(total_epochs * 0.9) + check_2 = int(total_epochs * 0.8) + check_3 = int(total_epochs * 0.6) + check_4 = int(total_epochs * 0.4) + + if epoch > check_1: + lr *= 1e-4 + elif epoch > check_2: + lr *= 1e-3 + elif epoch > check_3: + lr *= 1e-2 + elif epoch > check_4: + lr *= 1e-1 + + return lr + + +def split_data(train_data, train_label): + return train_data[:1150, :, :], train_label[:1150, :], train_data[-70:, :, :], train_label[-70:, :] + + +# +if __name__ == '__main__': + # 超参数设置 + filter_num = 50 # 时间部 + dims = 10 # 表示一个点的维度 + unit = 20 + batch_size = 32 + EPOCH = 1000 + model_name = 'self_LSTM' + predict_num = 50 + + # save_name = "../model/weight/{0}_unit{1}_FilterNum{2}_Dims{3}_Epoch{4}_weight/weight".format(model_name, unit, + # filter_num, dims, + # EPOCH) + save_name = "../model/{0}_unit{1}_FilterNum{2}_Dims{3}_Epoch{4}.h5".format(model_name, unit, + filter_num, dims, + EPOCH) + save_loss_name = "../model/loss/{0}_unit{1}_FilterNum{2}_Dims{3}_Epoch{4}_loss.npy".format(model_name, unit, + filter_num, + dims, + EPOCH) + predict_name = "../data/predict_data/{0}_FilterNum{1}_Dims{2}_BatchSize{3}_Epoch{4}_predict{5}.npy".format( + model_name, + filter_num, + dims, + batch_size, + EPOCH, + predict_num) + + predict_data, train_data, train_label, query_label, total_data, HI_merge_data_origin = getData( + filter_num=filter_num, dims=dims, batch_size=batch_size) + + model = predict_model() + checkpoint = tf.keras.callbacks.ModelCheckpoint( + filepath=save_name, + monitor='val_loss', + verbose=1, + save_best_only=True, + mode='min', + period=1) + lr_scheduler = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=5, min_lr=0.001) + + model.compile(optimizer=tf.optimizers.Adam(0.01), loss=FTMSE()) + model.summary() + early_stop = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=200, mode='min', verbose=1) + + train_data, train_label, val_data, val_label = split_data(train_data=train_data, train_label=train_label) + + history = model.fit(train_data, train_label, epochs=EPOCH, + batch_size=batch_size, validation_data=(val_data, val_label), shuffle=False, verbose=1, + callbacks=[checkpoint, lr_scheduler, early_stop]) + # # # # model.save(save_name) + # newModel = tf.keras.models.load_model(save_name, custom_objects={'LSTM_realize': LSTM_realize}) + + # fig4 = plt.figure() + # ax4 = fig4.add_subplot() + # plt.plot(history.epoch, history.history.get('loss'), label='loss') + # plt.plot(history.epoch, history.history.get('val_loss'), label='val_loss') + # plt.show() + + # loss_folder = save_loss_name[:-4] + # folderGenerate(loss_folder) + # np.save(save_loss_name, history_loss) + + # newModel = PredictModel(filter_num=filter_num, dims=dims, batch_size=batch_size, query_label=query_label) + # newModel.load_weights(save_name) + # # history_loss=np.load(save_loss_name) + # # print(history_loss) + # + # + # # # ''' + # # # train_data.shape:(570,600,30) + # # # predict_data.shape:(571,600,30) + # # # train_label.shape:(570,600) + # # # query_label.shape:(570,600,1) # # # ''' + # # 连续预测五十个点并画出来 + # predict_num = 50 + # all_data = predictOneByOneWithBatch1(newModel=newModel, predict_data=predict_data, predict_num=predict_num) + # + # # all_data = predictContinued(predict_data=predict_data, predict_num=predict_num) + # # all_data = predictOneByOne(predict_data=predict_data, predict_num=predict_num) + # # all_data = selfPredictContinued(predict_data=predict_data, query_label=query_label, predict_num=predict_num) + # # all_data = selfPredictOneByOne(predict_data=predict_data, query_label=query_label, predict_num=predict_num) + # + # # 获取到的所有data进行画图 + # print(all_data.shape) # (700,600,1) + # (all_dims,) = all_data.shape + # all_x = np.arange(all_dims) + # print(all_x.shape) + # # np.save(predict_name, all_data) + # + # before_data = total_data + # (before_dims,) = before_data.shape + # before_x = np.arange(before_dims) + # all_x = np.arange(all_dims) + # + # fig5 = plt.figure() + # ax5 = fig5.add_subplot(2, 1, 1) + # ax5.plot(all_data) + # ax5.plot(before_data) + # ax51 = fig5.add_subplot(2, 1, 2) + # ax51.plot(HI_merge_data_origin[0:all_dims, 1]) + # plt.show() + # + # print("before_data.shape:", before_data.shape) + # print("flatten_all_data.shape:", all_data.shape) + # print("before_x.shape:", before_x.shape) + # print("all_x.shape:", all_x.shape) + # + # fig6 = plt.figure() + # ax6 = fig6.add_subplot() + # plt.plot(before_x, before_data) + # plt.scatter(before_x, before_data) + # plt.scatter(all_x, all_data) + # plt.show() + + # fig4 = plt.figure() + # ax4 = fig4.add_subplot() + # plt.plot(history.epoch, history.history.get('loss'), label='loss') + # # plt.plot(history.epoch, history.history.get('val_loss'), label='val_loss') + # plt.show() diff --git a/TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/train/__init__.py b/TensorFlow_eaxmple/Model_train_test/2012轴承数据集预测挑战/train/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/TensorFlow_eaxmple/Model_train_test/RUL/FFTUtils.py b/TensorFlow_eaxmple/Model_train_test/RUL/FFTUtils.py index 1ed73a4..0929169 100644 --- a/TensorFlow_eaxmple/Model_train_test/RUL/FFTUtils.py +++ b/TensorFlow_eaxmple/Model_train_test/RUL/FFTUtils.py @@ -9,6 +9,7 @@ import numpy as np import tensorflow as tf + ''' freq_value: 可以是时域通过np.fft.fft转换而来 ''' diff --git a/TensorFlow_eaxmple/Model_train_test/RUL/ResultShowUtils.py b/TensorFlow_eaxmple/Model_train_test/RUL/ResultShowUtils.py new file mode 100644 index 0000000..30b17e0 --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/RUL/ResultShowUtils.py @@ -0,0 +1,116 @@ +# -*- encoding:utf-8 -*- + +''' +@Author : dingjiawen +@Date : 2023/6/14 15:28 +@Usage : +@Desc : 一些画图方法 +''' +from sklearn.metrics import mean_absolute_error, mean_squared_error +from pylab import * + +# 图像上显示中文 +mpl.rcParams['font.sans-serif'] = ['SimHei'] +# 调整使图像支持负号 +mpl.rcParams["axes.unicode_minus"] = False + +font1 = { + 'family': 'Times New Roman', + 'weight': 'normal', + 'size': 12, +} + +font2 = { + 'family': 'Times New Roman', + 'weight': 'normal', + 'size': 15, +} + + +def calScore(y_test, pred): + # TODO 打印误差 + test_mse = round(mean_squared_error(y_test, pred), 4) + test_rmse = round(math.sqrt(mean_squared_error(y_test, pred)), 4) + # mape 暂时这样 + test_mape = round(mean_absolute_error(pred, y_test) * 100, 4) + test_mae = round(mean_absolute_error(pred, y_test), 4) + # TODO 计算得分 + result = list(np.squeeze(pred, 1)) + exceed = list(filter(lambda res: res >= y_test[-1], result)) + print("len(exceed)", len(exceed)) + if len(exceed) > 0: + exceed_index = result.index(exceed[0]) + print("len(result)", len(result)) + # Eri = round((((2750 - (len(result) - exceed_index)) - 2750) / 2750), 4) + Eri = round((exceed_index / len(result)), 4) + print("RUL", 100 - (len(result) - exceed_index)) + print("Eri", Eri) + + if Eri <= 0: + score = round(math.exp(-math.log(0.5, e) * (Eri / 5)), 4) + else: + score = round(math.exp(math.log(0.5, e) * (Eri / 20)), 4) + print("score1", score) + score = exceed_index / len(result) + else: + Eri = nan + score = nan + + print('MSE_testScore: %.4f MSE' % test_mse) + print('RMSE_testScore: %.4f RMSE' % test_rmse) + print('MAE_testScore: %.4f MAE' % test_mae) + print('MAPE_testScore: %.4f MAPE' % test_mape) + print("score: %.4f score" % score) + pass + + +# 画图 +def getPlot(data, feature, time_step, x_train, x_test, pred, truePred, train_pred, + saveName="../store/test"): + train_pred = np.squeeze(train_pred, 1) + print("train_pred", train_pred) + + # TODO 实际值 + # 设置xtick和ytick的方向:in、out、inout + plt.rcParams['xtick.direction'] = 'in' + plt.rcParams['ytick.direction'] = 'in' + plt.plot(list(range(data.shape[0])), data) + # 画出 y=1 这条水平线 + plt.axhline(data[-1], c='green') + plt.grid() + + plt.ylim(0, 1) + plt.xlim(-50, 1300) + + # TODO 真实预测散点图 + + # TODO 图2 + plt.figure(2) + point_len = x_train.shape[0] + feature + time_step - 1 + + # plt.figure(2, figsize=(12, 4)) + # 设置xtick和ytick的方向:in、out、inout + plt.rcParams['xtick.direction'] = 'in' + plt.rcParams['ytick.direction'] = 'in' + + print("pred", pred[:, -1]) + print("truePred", truePred[:, -1]) + figname2 = saveName + "single.png" + plt.scatter(list(range(data.shape[0])), data, c='blue', s=12, label='Actual value') + # # TODO 这里要改成Training value 10(重叠丢失) + 9(转置) +1141(训练数据已知) + 9(转置) = 1169 + 81 (预测数据) =1250 + # # 训练数据传入模型预测一次即为训练数据 + plt.plot(list(range(time_step + feature - 1, point_len)), train_pred, linewidth=2, color='red', + label='Training value') + + plt.scatter(list(range(point_len, point_len + x_test.shape[0])), pred, c='black', s=15, + label='Predictive value') + # 画出 y=1 这条水平线 + plt.axhline(data[-1], linewidth=2, c='green', label='Failure threshold') + plt.ylim(-0.2, 0.95) + plt.xlim(-50, 1300) + plt.xlabel("Serial number of the fusion feature point", font=font2) + plt.ylabel("Virtual health indicator", font=font2) + plt.legend(loc='upper left', prop=font1) + plt.savefig(figname2, ) + + diff --git a/TensorFlow_eaxmple/Model_train_test/RUL/test/DCTNet.py b/TensorFlow_eaxmple/Model_train_test/RUL/test/DCTNet.py new file mode 100644 index 0000000..1dd1a71 --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/RUL/test/DCTNet.py @@ -0,0 +1,138 @@ +#-*- encoding:utf-8 -*- + +''' +@Author : dingjiawen +@Date : 2023/6/18 11:10 +@Usage : +@Desc : +''' + +from distutils.command.config import config +import torch.nn as nn +import math +import numpy as np +import torch + +try: + from torch import irfft + from torch import rfft +except ImportError: + def rfft(x, d): + t = torch.fft.fft(x, dim=(-d)) + r = torch.stack((t.real, t.imag), -1) + return r + + + def irfft(x, d): + t = torch.fft.ifft(torch.complex(x[:, :, 0], x[:, :, 1]), dim=(-d)) + return t.real + + +def dct(x, norm=None): + """ + Discrete Cosine Transform, Type II (a.k.a. the DCT) + + For the meaning of the parameter `norm`, see: + https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.fftpack.dct.html + + :param x: the input signal + :param norm: the normalization, None or 'ortho' + :return: the DCT-II of the signal over the last dimension + """ + x_shape = x.shape + N = x_shape[-1] + x = x.contiguous().view(-1, N) + + v = torch.cat([x[:, ::2], x[:, 1::2].flip([1])], dim=1) + + # Vc = torch.fft.rfft(v, 1, onesided=False) + Vc = rfft(v, 1) + + k = - torch.arange(N, dtype=x.dtype, device=x.device)[None, :] * np.pi / (2 * N) + W_r = torch.cos(k) + W_i = torch.sin(k) + + V = Vc[:, :, 0] * W_r - Vc[:, :, 1] * W_i + + if norm == 'ortho': + V[:, 0] /= np.sqrt(N) * 2 + V[:, 1:] /= np.sqrt(N / 2) * 2 + + V = 2 * V.view(*x_shape) + + return V + + +# class senet_block(nn.Module): +# def __init__(self, channel=512, ratio=1): +# super(dct_channel_block, self).__init__() +# self.avg_pool = nn.AdaptiveAvgPool1d(1) #innovation +# self.fc = nn.Sequential( +# nn.Linear(channel, channel // 4, bias=False), +# nn.ReLU(inplace=True), +# nn.Linear(channel //4, channel, bias=False), +# nn.Sigmoid() +# ) + +# def forward(self, x): +# # b, c, l = x.size() # (B,C,L) +# # y = self.avg_pool(x) # (B,C,L) -> (B,C,1) +# # print("y",y.shape) +# x = x.permute(0,2,1) +# b, c, l = x.size() +# y = self.avg_pool(x).view(b, c) # (B,C,L) ->(B,C,1) +# # print("y",y.shape) +# # y = self.fc(y).view(b, c, 96) + +# y = self.fc(y).view(b,c,1) +# # print("y",y.shape) +# # return x * y +# return (x*y).permute(0,2,1) +class dct_channel_block(nn.Module): + def __init__(self, channel): + super(dct_channel_block, self).__init__() + # self.avg_pool = nn.AdaptiveAvgPool1d(1) #innovation + self.fc = nn.Sequential( + nn.Linear(channel, channel * 2, bias=False), + nn.Dropout(p=0.1), + nn.ReLU(inplace=True), + nn.Linear(channel * 2, channel, bias=False), + nn.Sigmoid() + ) + # self.dct_norm = nn.LayerNorm([512], eps=1e-6) + + self.dct_norm = nn.LayerNorm([96], eps=1e-6) # for lstm on length-wise + # self.dct_norm = nn.LayerNorm([36], eps=1e-6)#for lstm on length-wise on ill with input =36 + + def forward(self, x): + b, c, l = x.size() # (B,C,L) (32,96,512) + # y = self.avg_pool(x) # (B,C,L) -> (B,C,1) + + # y = self.avg_pool(x).view(b, c) # (B,C,L) -> (B,C,1) + # print("y",y.shape + # y = self.fc(y).view(b, c, 96) + list = [] + for i in range(c): + freq = dct(x[:, i, :]) + # print("freq-shape:",freq.shape) + list.append(freq) + + stack_dct = torch.stack(list, dim=1) + stack_dct = torch.tensor(stack_dct) + ''' + for traffic mission:f_weight = self.dct_norm(f_weight.permute(0,2,1))#matters for traffic datasets + ''' + + lr_weight = self.dct_norm(stack_dct) + lr_weight = self.fc(stack_dct) + lr_weight = self.dct_norm(lr_weight) + + # print("lr_weight",lr_weight.shape) + return x * lr_weight # result + + +if __name__ == '__main__': + tensor = torch.rand(8, 7, 96) + dct_model = dct_channel_block() + result = dct_model.forward(tensor) + print("result.shape:", result.shape) \ No newline at end of file diff --git a/TensorFlow_eaxmple/Model_train_test/RUL/test/DCTTest.py b/TensorFlow_eaxmple/Model_train_test/RUL/test/DCTTest.py new file mode 100644 index 0000000..503eda3 --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/RUL/test/DCTTest.py @@ -0,0 +1,52 @@ +#-*- encoding:utf-8 -*- + +''' +@Author : dingjiawen +@Date : 2023/6/18 11:03 +@Usage : +@Desc : 离散余弦变换DCT测试 +''' + +''' +参考: +[1] https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.fftpack.dct.html +''' + + +import numpy as np +import cv2 +from scipy.fftpack import dct,idct +import matplotlib.pyplot as plt + +array = np.array([-0.029078494757, + -0.33095228672, + -0.12124221772, + 0.553512275219, + -0.158036053181, + 0.268739402294, + -0.638222515583, + 0.233140587807, + -0.173265621066, + 0.467218101025, + -0.372010827065, + -0.136630430818, + 0.343256533146, + 0.008932195604]) + +dct_array = dct(array,norm='ortho') +dct_array_t = idct(dct_array,norm='ortho') +amp = np.abs(np.fft.fft(array) / len(array)) + +print(amp) +print(dct_array) + + +plt.subplot(4, 1, 1) +plt.plot(array) +plt.subplot(4, 1, 2) +plt.plot(dct_array) +plt.subplot(4, 1, 3) +plt.plot(dct_array_t) +plt.subplot(4, 1, 4) +plt.plot(amp) +plt.show() diff --git a/TensorFlow_eaxmple/Model_train_test/RUL/test/FFTTest.py b/TensorFlow_eaxmple/Model_train_test/RUL/test/FFTTest.py index c6d2f3c..315c415 100644 --- a/TensorFlow_eaxmple/Model_train_test/RUL/test/FFTTest.py +++ b/TensorFlow_eaxmple/Model_train_test/RUL/test/FFTTest.py @@ -32,11 +32,11 @@ amp = np.abs(np.fft.fft(array) / len(array)) # 相角 angle = np.angle(np.fft.fft(array)) # 时部 -real = np.real(np.fft.rfft(array) * 2 / len(array)) +real = np.real(np.fft.fft(array) * 2 / len(array)) # 虚部 -imag = np.imag(np.fft.rfft(array) * 2 / len(array)) +imag = np.imag(np.fft.fft(array) * 2 / len(array)) # -angle1 = np.arctan(imag / real) +angle1 = np.arctan2(imag, real) print(angle) print(angle1) diff --git a/TensorFlow_eaxmple/Model_train_test/RUL/test/LSTMContinueTest.py b/TensorFlow_eaxmple/Model_train_test/RUL/test/LSTMContinueTest.py new file mode 100644 index 0000000..28e4e1c --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/RUL/test/LSTMContinueTest.py @@ -0,0 +1,268 @@ +# -*- encoding:utf-8 -*- + +''' +@Author : dingjiawen +@Date : 2023/6/14 14:56 +@Usage : +@Desc : 测试所实现的LSTM +''' + +import tensorflow as tf +import numpy as np +from model.LSTM.LSTMByDense import LSTMLayer as LSTMLayer +import matplotlib.pyplot as plt +from keras.callbacks import EarlyStopping + +from model.LossFunction.FTMSE import FTMSE +import math +from sklearn.metrics import mean_absolute_error, mean_squared_error +from pylab import * + +''' +超参数设置: +''' +hidden_num = 10 # LSTM细胞个数 +feature = 10 # 一个点的维度 +batch_size = 32 +EPOCH = 1000 +unit = 512 # LSTM的维度 +predict_num = 50 # 预测个数 +model_name = "LSTM" +save_name = r"selfMulti_norm_{0}_hidden{1}_unit{2}_feature{3}_predict{4}.h5".format(model_name, hidden_num, unit, + feature, + predict_num) + + +def standardization(data): + mu = np.mean(data, axis=0) + sigma = np.std(data, axis=0) + return (data - mu) / sigma + + +def normalization(data): + _range = np.max(data) - np.min(data) + return (data - np.min(data)) / _range + + +# LSTM_cell的数目,维度,是否正则化 +def getData(filter_num, dims, if_norm: bool = False): + # 数据读入 + HI_merge_data_origin = np.load("../../2012轴承数据集预测挑战/HI_create/HI_merge_data.npy") + + # plt.plot(HI_merge_data[0:1250, 1]) + # 去除掉退化特征不明显前面的点 + HI_merge_data = HI_merge_data_origin[0:1250, 1] + + # 是否正则化 + if if_norm: + HI_merge_data = normalization(HI_merge_data) + + # plt.plot(HI_merge_data) + # plt.show() + (total_dims,) = HI_merge_data.shape + + # # 将其分成重叠采样状态-滑动窗口函数 + predict_data = np.empty(shape=[total_dims - filter_num, filter_num]) + + # 重叠采样获取时间部和训练次数 + for dim in range(total_dims - filter_num): + predict_data[dim] = HI_merge_data[dim:dim + filter_num] + + train_label = predict_data[dims:, :] + train_label_single = HI_merge_data[dims + filter_num - 1:-1] + + # 再重叠采样获取一个点的维度 + '''train_data.shape:(sample,filter_num) -> (sample,filter_num,dims)''' + + # # 将其分成重叠采样状态-滑动窗口函数 + train_data = np.empty(shape=[dims, total_dims - filter_num - dims, filter_num]) + + for dim in range(dims): + train_data[dim] = predict_data[dim:total_dims - filter_num - dims + dim, :] + + # 转置变成想要的数据 (dims,sample,filter_num) -> (sample,filter_num,dims) + train_data = tf.transpose(train_data, [1, 2, 0]) + + # todo 解决模型保存时,query无法序列化的问题 + total_data = tf.cast(HI_merge_data, dtype=tf.float32) + train_data = tf.cast(train_data, dtype=tf.float32) + train_label = tf.cast(train_label, dtype=tf.float32) + train_label_single = tf.cast(train_label_single, dtype=tf.float32) + + print("total_data.shape:", total_data.shape) + print("train_data.shape:", train_data.shape) # (20, 1200, 30) + print("train_label.shape:", train_label.shape) # (20, 1200) + print("train_label_single.shape:", train_label_single.shape) + + # 所有的原始数据;所有的训练数据;所有的训练标签(预测一个序列);所有的训练标签(预测一个点) + return total_data, train_data, train_label, train_label_single + + +''' +train_data.shape: (total_dims - filter_num - 1, filter_num,dims) :(570,600,30) +predict_data.shape: (total_dims - filter_num, filter_num) :(571,600,30) +train_label.shape: (total_dims - filter_num - 1, filter_num) :(570,600) +''' + + +def remove(train_data, train_label, batch_size): + epoch, _, _ = train_data.shape + size = int(epoch / batch_size) + return train_data[:size * batch_size], train_label[:size * batch_size] + + +''' +train_data.shape: (1230, 10, 10) +train_label.shape: (1230, 10) +train_label_single.shape: (1230,) +''' + + +def splitValData(data, label, label_single, predict_num=50): + sample, hidden, feature = data.shape + + train_data = data[:sample - predict_num, :, :] + val_data = data[sample - predict_num:, :, :] + + train_label = label[:sample - predict_num, :] + val_label = label[sample - predict_num:, :] + + train_label_single = label_single[:sample - predict_num, ] + val_label_single = label_single[sample - predict_num:, ] + + return train_data, val_data, train_label, val_label, train_label_single, val_label_single + + +def predict_model_multi(filter_num, dims): + input = tf.keras.Input(shape=[filter_num, dims]) + input = tf.cast(input, tf.float32) + + #### 官方 + # LSTM = tf.keras.layers.LSTM(units=512, return_sequences=True)(input) + # LSTM = tf.keras.layers.LSTM(units=256, return_sequences=False)(LSTM) + + #### 自己 + LSTM = LSTMLayer(units=512, return_sequences=True)(input) + LSTM = LSTMLayer(units=256, return_sequences=True)(LSTM) + + ###flatten + x = tf.keras.layers.Flatten()(LSTM) + x = tf.keras.layers.Dense(128, activation="relu")(x) + x = tf.keras.layers.Dense(64, activation="relu")(x) + x = tf.keras.layers.Dropout(0.2)(x) + x = tf.keras.layers.BatchNormalization()(x) + x = tf.keras.layers.Dense(32, activation="relu")(x) + x = tf.keras.layers.Dropout(0.2)(x) + x = tf.keras.layers.BatchNormalization()(x) + # x = tf.keras.layers.Dense(16, activation="relu")(x) + output = tf.keras.layers.Dense(10, activation="relu", name='output')(x) + + model = tf.keras.Model(inputs=input, outputs=output) + return model + + +def split_data(train_data, train_label): + return train_data[:1150, :, :], train_label[:1150, :], train_data[-70:, :, :], train_label[-70:, :] + + +# 仅使用预测出来的最新的一个点预测以后 +def predictOneByOne(newModel, train_data, predict_num=50): + # 取出训练数据的最后一条 + each_predict_data = np.expand_dims(train_data[-1, :, :], axis=0) + predicted_list = np.empty(shape=(predict_num, 1)) # (5,filter_num,30) + # all_data = total_data # (1201,) + for each_predict in range(predict_num): + # predicted_data.shape : (1,1) + predicted_data = newModel.predict(each_predict_data) # (batch_size,filer_num,1) + predicted_list[each_predict] = predicted_data + # (1,1) => (10,1) + temp1 = np.transpose(np.concatenate([each_predict_data[:, 1:, -1], predicted_data], axis=1), [1, 0]) + + each_predict_data = np.expand_dims( + np.concatenate([np.squeeze(each_predict_data[:, :, 1:], axis=0), temp1], axis=1), axis=0) + + return predicted_list + + +# 使用最后预测出来的一整行与之前的拼接 +def predictContinueByOne(newModel, train_data, predict_num=50): + # 取出训练数据的最后一条 + each_predict_data = np.expand_dims(train_data[-1, :, :], axis=0) + predicted_list = np.empty(shape=(predict_num, 1)) # (5,filter_num,30) + + for each_predict in range(predict_num): + # predicted_data.shape : (1,10) 取最后一条 + predicted_data = newModel.predict(each_predict_data) # (batch_size,filer_num,1) + predicted_data = np.expand_dims(predicted_data[:, -1], axis=-1) + predicted_list[each_predict] = predicted_data + # (1,1) => (10,1)l + temp1 = np.transpose(np.concatenate([each_predict_data[:, 1:, -1], predicted_data], axis=1), [1, 0]) + + each_predict_data = np.expand_dims( + np.concatenate([np.squeeze(each_predict_data[:, :, 1:], axis=0), temp1], axis=1), axis=0) + + return predicted_list + + +# 不使用预测的数据,直接使用已知的数据持续预测 +def predictByEveryData(trained_model: tf.keras.Model, predict_data): + # shape:(1180,10) 取每一次的最后一个点就是从头到尾预测的 + predicted_data = trained_model.predict(predict_data) + predicted_data = np.expand_dims(predicted_data[:, -1], axis=-1) + + predicted_data = np.concatenate([np.expand_dims(total_data[:hidden_num + feature, ], axis=1), predicted_data], + axis=0) + data = predictContinueByOne(trained_model, predict_data, predict_num=predict_num) + predicted_data = np.concatenate([predicted_data, data], axis=0) + return predicted_data + pass + + +if __name__ == '__main__': + # 数据读取 + # 数据读入 --> 所有的原始数据;所有的训练数据;所有的训练标签(预测一个序列);所有的训练标签(预测一个点) + total_data, train_data, train_label, train_label_single = getData(hidden_num, feature, if_norm=False) + # 根据预测的点数划分训练集和测试集(验证集) + train_data, val_data, train_label, val_label, train_label_single, val_label_single = splitValData(train_data, + train_label, + train_label_single, + predict_num=predict_num) + # # #### TODO 训练 + model = predict_model_multi(hidden_num, feature) + checkpoint = tf.keras.callbacks.ModelCheckpoint( + filepath=save_name, + monitor='val_loss', + verbose=2, + save_best_only=True, + mode='min') + lr_scheduler = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=20, min_lr=0.001) + + model.compile(optimizer=tf.optimizers.SGD(), loss=tf.losses.mse) + model.summary() + early_stop = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=100, mode='min', verbose=1) + + history = model.fit(train_data, train_label, epochs=EPOCH, + batch_size=batch_size, validation_data=(val_data, val_label_single), shuffle=True, verbose=2, + callbacks=[checkpoint, lr_scheduler, early_stop]) + + #### TODO 测试 + + trained_model = tf.keras.models.load_model(save_name, custom_objects={'LSTMLayer': LSTMLayer}) + + # 使用已知的点进行预测 + predicted_data = predictByEveryData(trained_model, train_data) + # 使用预测的点持续预测 + # predicted_data = predictOneByOne(trained_model, total_data, train_data) + + print("predicted_data:", predicted_data) + print("predicted_data.shape:", predicted_data.shape) + + plt.figure(1) + plt.subplot(2, 1, 1) + plt.plot(total_data) + # plt.subplot(2, 1, 2) + plt.plot(predicted_data) + + # plt.scatter() + + plt.show() diff --git a/TensorFlow_eaxmple/Model_train_test/RUL/test/LSTMTest.py b/TensorFlow_eaxmple/Model_train_test/RUL/test/LSTMTest.py new file mode 100644 index 0000000..9c5a595 --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/RUL/test/LSTMTest.py @@ -0,0 +1,223 @@ +# -*- encoding:utf-8 -*- + +''' +@Author : dingjiawen +@Date : 2023/6/14 14:56 +@Usage : +@Desc : 测试所实现的LSTM +''' + +import tensorflow as tf +import numpy as np +from model.LSTM.LSTM import LSTMLayer as LSTMLayer +import matplotlib.pyplot as plt +from keras.callbacks import EarlyStopping + +from model.LossFunction.FTMSE import FTMSE +import math +from sklearn.metrics import mean_absolute_error, mean_squared_error +from pylab import * + +''' +超参数设置: +''' +hidden_num = 10 # LSTM细胞个数 +feature = 10 # 一个点的维度 +batch_size = 32 +EPOCH = 1000 +unit = 512 # LSTM的维度 +predict_num = 50 # 预测个数 +model_name = "LSTM" +save_name = r"self_{0}_hidden{1}_unit{2}_feature{3}_predict{4}.h5".format(model_name, hidden_num, unit, feature, + predict_num) + + +def getData(filter_num, dims): + # 数据读入 + HI_merge_data_origin = np.load("../../2012轴承数据集预测挑战/HI_create/HI_merge_data.npy") + + # plt.plot(HI_merge_data[0:1250, 1]) + # 去除掉退化特征不明显前面的点 + HI_merge_data = HI_merge_data_origin[0:1250, 1] + + # plt.plot(HI_merge_data) + # plt.show() + (total_dims,) = HI_merge_data.shape + + # # 将其分成重叠采样状态-滑动窗口函数 + predict_data = np.empty(shape=[total_dims - filter_num, filter_num]) + + # 重叠采样获取时间部和训练次数 + for dim in range(total_dims - filter_num): + predict_data[dim] = HI_merge_data[dim:dim + filter_num] + + train_label = predict_data[dims:, :] + train_label_single = HI_merge_data[dims + filter_num - 1:-1] + + # 再重叠采样获取一个点的维度 + '''train_data.shape:(sample,filter_num) -> (sample,filter_num,dims)''' + + # # 将其分成重叠采样状态-滑动窗口函数 + train_data = np.empty(shape=[dims, total_dims - filter_num - dims, filter_num]) + + for dim in range(dims): + train_data[dim] = predict_data[dim:total_dims - filter_num - dims + dim, :] + + # 转置变成想要的数据 (dims,sample,filter_num) -> (sample,filter_num,dims) + train_data = tf.transpose(train_data, [1, 2, 0]) + + # todo 解决模型保存时,query无法序列化的问题 + total_data = tf.cast(HI_merge_data, dtype=tf.float32) + train_data = tf.cast(train_data, dtype=tf.float32) + train_label = tf.cast(train_label, dtype=tf.float32) + train_label_single = tf.cast(train_label_single, dtype=tf.float32) + + print("total_data.shape:", total_data.shape) + print("train_data.shape:", train_data.shape) # (20, 1200, 30) + print("train_label.shape:", train_label.shape) # (20, 1200) + print("train_label_single.shape:", train_label_single.shape) + + # 所有的原始数据;所有的训练数据;所有的训练标签(预测一个序列);所有的训练标签(预测一个点) + return total_data, train_data, train_label, train_label_single + + +''' +train_data.shape: (total_dims - filter_num - 1, filter_num,dims) :(570,600,30) +predict_data.shape: (total_dims - filter_num, filter_num) :(571,600,30) +train_label.shape: (total_dims - filter_num - 1, filter_num) :(570,600) +''' + +def remove(train_data, train_label, batch_size): + epoch, _, _ = train_data.shape + size = int(epoch / batch_size) + return train_data[:size * batch_size], train_label[:size * batch_size] + + +''' +train_data.shape: (1230, 10, 10) +train_label.shape: (1230, 10) +train_label_single.shape: (1230,) +''' +def splitValData(data, label, label_single, predict_num=50): + sample, hidden, feature = data.shape + + train_data = data[:sample - predict_num, :, :] + val_data = data[sample - predict_num:, :, :] + + train_label = label[:sample - predict_num, :] + val_label = label[sample - predict_num:, :] + + train_label_single = label_single[:sample - predict_num, ] + val_label_single = label_single[sample - predict_num:, ] + + return train_data, val_data, train_label, val_label, train_label_single, val_label_single + + +def predict_model(filter_num, dims): + input = tf.keras.Input(shape=[filter_num, dims]) + input = tf.cast(input, tf.float32) + + #### 官方 + # LSTM = tf.keras.layers.LSTM(units=512, return_sequences=True)(input) + # LSTM = tf.keras.layers.LSTM(units=256, return_sequences=False)(LSTM) + + #### 自己 + LSTM = LSTMLayer(units=512, return_sequences=True)(input) + LSTM = LSTMLayer(units=256, return_sequences=False)(LSTM) + + x = tf.keras.layers.Dense(128, activation="relu")(LSTM) + x = tf.keras.layers.Dense(64, activation="relu")(x) + x = tf.keras.layers.Dropout(0.2)(x) + x = tf.keras.layers.BatchNormalization()(x) + x = tf.keras.layers.Dense(32, activation="relu")(x) + x = tf.keras.layers.Dropout(0.2)(x) + x = tf.keras.layers.BatchNormalization()(x) + x = tf.keras.layers.Dense(16, activation="relu")(x) + output = tf.keras.layers.Dense(1, activation="relu", name='output')(x) + + model = tf.keras.Model(inputs=input, outputs=output) + return model + + +def split_data(train_data, train_label): + return train_data[:1150, :, :], train_label[:1150, :], train_data[-70:, :, :], train_label[-70:, :] + + +# 仅使用预测出来的最新的一个点预测以后 +def predictOneByOne(newModel, train_data, predict_num=50): + # 取出训练数据的最后一条 + each_predict_data = np.expand_dims(train_data[-1, :, :], axis=0) + predicted_list = np.empty(shape=(predict_num, 1)) # (5,filter_num,30) + # all_data = total_data # (1201,) + for each_predict in range(predict_num): + # predicted_data.shape : (1,1) + predicted_data = newModel.predict(each_predict_data) # (batch_size,filer_num,1) + predicted_list[each_predict] = predicted_data + # (1,1) => (10,1) + temp1 = np.transpose(np.concatenate([each_predict_data[:, 1:, -1], predicted_data], axis=1), [1, 0]) + + each_predict_data = np.expand_dims( + np.concatenate([np.squeeze(each_predict_data[:, :, 1:], axis=0), temp1], axis=1), axis=0) + + return predicted_list + + +# 不使用预测的数据,直接使用已知的数据持续预测 +def predictByEveryData(trained_model: tf.keras.Model, predict_data): + predicted_data = trained_model.predict(predict_data) + predicted_data = np.concatenate([np.expand_dims(total_data[:hidden_num + feature, ], axis=1), predicted_data], + axis=0) + data = predictOneByOne(trained_model, predict_data) + predicted_data = np.concatenate([predicted_data, data], axis=0) + return predicted_data + pass + + +if __name__ == '__main__': + # 数据读取 + # 数据读入 --> 所有的原始数据;所有的训练数据;所有的训练标签(预测一个序列);所有的训练标签(预测一个点) + total_data, train_data, train_label, train_label_single = getData(hidden_num, feature) + # 根据预测的点数划分训练集和测试集(验证集) + train_data, val_data, train_label, val_label, train_label_single, val_label_single = splitValData(train_data, + train_label, + train_label_single, + predict_num=predict_num) + # # #### TODO 训练 + model = predict_model(hidden_num, feature) + checkpoint = tf.keras.callbacks.ModelCheckpoint( + filepath=save_name, + monitor='val_loss', + verbose=2, + save_best_only=True, + mode='min') + lr_scheduler = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=20, min_lr=0.001) + + model.compile(optimizer=tf.optimizers.SGD(), loss=tf.losses.mse) + model.summary() + early_stop = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=100, mode='min', verbose=1) + + history = model.fit(train_data, train_label_single, epochs=EPOCH, + batch_size=batch_size, validation_data=(val_data, val_label_single), shuffle=True, verbose=2, + callbacks=[checkpoint, lr_scheduler, early_stop]) + + #### TODO 测试 + + trained_model = tf.keras.models.load_model(save_name, custom_objects={'LSTMLayer': LSTMLayer}) + + # 使用已知的点进行预测 + predicted_data = predictByEveryData(trained_model, train_data) + # 使用预测的点持续预测 + # predicted_data = predictOneByOne(trained_model, total_data, train_data) + + print("predicted_data:", predicted_data) + print("predicted_data.shape:", predicted_data.shape) + + plt.figure(1) + plt.subplot(2, 1, 1) + plt.plot(total_data) + # plt.subplot(2, 1, 2) + plt.plot(predicted_data) + + # plt.scatter() + + plt.show() diff --git a/TensorFlow_eaxmple/Model_train_test/model/ChannelAttention/DCT_channelAttention.py b/TensorFlow_eaxmple/Model_train_test/model/ChannelAttention/DCT_channelAttention.py new file mode 100644 index 0000000..3c6b693 --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/model/ChannelAttention/DCT_channelAttention.py @@ -0,0 +1,74 @@ +# -*- encoding:utf-8 -*- + +''' +@Author : dingjiawen +@Date : 2023/6/18 14:08 +@Usage : +@Desc : DCT的通道注意力模块 +''' +import tensorflow as tf +import tensorflow.keras +from tensorflow.keras import * +import tensorflow.keras.layers as layers +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt +from model.DepthwiseCon1D.DepthwiseConv1D import DepthwiseConv1D + + +class DCTChannelAttention(layers.Layer): + def __init__(self): + # 调用父类__init__()方法 + super(DCTChannelAttention, self).__init__() + self.DWC = DepthwiseConv1D(kernel_size=1, padding='SAME') + + def build(self, input_shape): + if len(input_shape) != 3: + raise ValueError('Inputs to `DynamicChannelAttention` should have rank 3. ' + 'Received input shape:', str(input_shape)) + + # print(input_shape) + # GAP + self.GAP = tf.keras.layers.GlobalAvgPool1D() + self.c1 = tf.keras.layers.Conv1D(filters=input_shape[2], kernel_size=1, padding='SAME') + # s1 = tf.nn.sigmoid(c1) + + # GMP + self.GMP = tf.keras.layers.GlobalMaxPool1D() + self.c2 = tf.keras.layers.Conv1D(filters=input_shape[2], kernel_size=1, padding='SAME') + # s2 = tf.nn.sigmoid(c2) + + # weight + self.weight_kernel = self.add_weight( + shape=(1, input_shape[2]), + initializer='glorot_uniform', + name='weight_kernel') + + def call(self, inputs, **kwargs): + batch_size, length, channel = inputs.shape + # print(batch_size,length,channel) + DWC1 = self.DWC(inputs) + + # GAP + GAP = self.GAP(DWC1) + GAP = tf.expand_dims(GAP, axis=1) + c1 = self.c1(GAP) + c1 = tf.keras.layers.BatchNormalization()(c1) + s1 = tf.nn.sigmoid(c1) + + # GMP + GMP = self.GMP(DWC1) + GMP = tf.expand_dims(GMP, axis=1) + c2 = self.c2(GMP) + c2 = tf.keras.layers.BatchNormalization()(c2) + s2 = tf.nn.sigmoid(c2) + + # print(self.weight_kernel) + + weight_kernel = tf.broadcast_to(self.weight_kernel, shape=[length, channel]) + weight_kernel = tf.broadcast_to(weight_kernel, shape=[batch_size, length, channel]) + s1 = tf.broadcast_to(s1, shape=[batch_size, length, channel]) + s2 = tf.broadcast_to(s2, shape=[batch_size, length, channel]) + + output = tf.add(weight_kernel * s1 * inputs, (tf.ones_like(weight_kernel) - weight_kernel) * s2 * inputs) + return output diff --git a/TensorFlow_eaxmple/Model_train_test/model/ChannelAttention/Dynamic_channelAttention.py b/TensorFlow_eaxmple/Model_train_test/model/ChannelAttention/Dynamic_channelAttention.py new file mode 100644 index 0000000..3cc7dba --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/model/ChannelAttention/Dynamic_channelAttention.py @@ -0,0 +1,129 @@ +# _*_ coding: UTF-8 _*_ + + +''' +@Author : dingjiawen +@Date : 2022/7/12 17:48 +@Usage : +@Desc : +''' + +import tensorflow as tf +import tensorflow.keras +from tensorflow.keras import * +import tensorflow.keras.layers as layers +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt +from model.DepthwiseCon1D.DepthwiseConv1D import DepthwiseConv1D + +import keras.backend as K + + +class Between_0_1(tf.keras.constraints.Constraint): + def __call__(self, w): + # 调用父类__init__()方法 + super(Between_0_1, self).__init__() + return K.clip(w, 0, 1) + + +class DynamicChannelAttention(layers.Layer): + + def __init__(self): + # 调用父类__init__()方法 + super(DynamicChannelAttention, self).__init__() + self.DWC = DepthwiseConv1D(kernel_size=1, padding='SAME') + # self.DWC = DepthwiseConv1D(kernel_size=1, padding='causal',dilation_rate=4,data_format='channels_last') + + def build(self, input_shape): + if len(input_shape) != 3: + raise ValueError('Inputs to `DynamicChannelAttention` should have rank 3. ' + 'Received input shape:', str(input_shape)) + + # print(input_shape) + # GAP + self.GAP = tf.keras.layers.GlobalAvgPool1D() + self.c1 = tf.keras.layers.Conv1D(filters=input_shape[2], kernel_size=1, padding='SAME') + # s1 = tf.nn.sigmoid(c1) + + # GMP + self.GMP = tf.keras.layers.GlobalMaxPool1D() + self.c2 = tf.keras.layers.Conv1D(filters=input_shape[2], kernel_size=1, padding='SAME') + # s2 = tf.nn.sigmoid(c2) + + # weight + self.weight_kernel = self.add_weight( + shape=(1, input_shape[2]), + initializer='glorot_uniform', + name='weight_kernel', + constraint=Between_0_1()) + + def call(self, inputs, **kwargs): + batch_size, length, channel = inputs.shape + # print(batch_size,length,channel) + DWC1 = self.DWC(inputs) + + + # GAP + GAP = self.GAP(DWC1) + GAP = tf.expand_dims(GAP, axis=1) + c1 = self.c1(GAP) + c1 = tf.keras.layers.BatchNormalization()(c1) + s1 = tf.nn.sigmoid(c1) + + # GMP + GMP = self.GMP(DWC1) + GMP = tf.expand_dims(GMP, axis=1) + c2 = self.c2(GMP) + c2 = tf.keras.layers.BatchNormalization()(c2) + s2 = tf.nn.sigmoid(c2) + + # print(self.weight_kernel) + + weight_kernel = tf.broadcast_to(self.weight_kernel, shape=[length, channel]) + weight_kernel = tf.broadcast_to(weight_kernel, shape=[batch_size, length, channel]) + s1 = tf.broadcast_to(s1, shape=[batch_size, length, channel]) + s2 = tf.broadcast_to(s2, shape=[batch_size, length, channel]) + + output = tf.add(weight_kernel * s1 * inputs, (tf.ones_like(weight_kernel) - weight_kernel) * s2 * inputs) + return output + + +class DynamicPooling(layers.Layer): + + def __init__(self, pool_size=2): + # 调用父类__init__()方法 + super(DynamicPooling, self).__init__() + self.pool_size = pool_size + pass + + def build(self, input_shape): + if len(input_shape) != 3: + raise ValueError('Inputs to `DynamicChannelAttention` should have rank 3. ' + 'Received input shape:', str(input_shape)) + # GAP + self.AP = tf.keras.layers.AveragePooling1D(pool_size=self.pool_size) + + # GMP + self.MP = tf.keras.layers.MaxPool1D(pool_size=self.pool_size) + + # weight + self.weight_kernel = self.add_weight( + shape=(int(input_shape[1] / self.pool_size), input_shape[2]), + initializer='glorot_uniform', + name='weight_kernel', + constraint=Between_0_1()) + + def call(self, inputs, **kwargs): + batch_size, length, channel = inputs.shape + + # GAP + GAP = self.AP(inputs) + + # GMP + GMP = self.MP(inputs) + + weight_kernel = tf.broadcast_to(self.weight_kernel, shape=GMP.shape) + + output = tf.add(weight_kernel * GAP, (tf.ones_like(weight_kernel) - weight_kernel) * GMP) + return output diff --git a/TensorFlow_eaxmple/Model_train_test/model/Dynamic_channelAttention/Light_channelAttention.py b/TensorFlow_eaxmple/Model_train_test/model/ChannelAttention/Light_channelAttention.py similarity index 100% rename from TensorFlow_eaxmple/Model_train_test/model/Dynamic_channelAttention/Light_channelAttention.py rename to TensorFlow_eaxmple/Model_train_test/model/ChannelAttention/Light_channelAttention.py diff --git a/TensorFlow_eaxmple/Model_train_test/model/Dynamic_channelAttention/SE_channelAttention.py b/TensorFlow_eaxmple/Model_train_test/model/ChannelAttention/SE_channelAttention.py similarity index 100% rename from TensorFlow_eaxmple/Model_train_test/model/Dynamic_channelAttention/SE_channelAttention.py rename to TensorFlow_eaxmple/Model_train_test/model/ChannelAttention/SE_channelAttention.py diff --git a/TensorFlow_eaxmple/Model_train_test/model/ChannelAttention/__init__.py b/TensorFlow_eaxmple/Model_train_test/model/ChannelAttention/__init__.py new file mode 100644 index 0000000..e6b6376 --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/model/ChannelAttention/__init__.py @@ -0,0 +1,9 @@ +# _*_ coding: UTF-8 _*_ + + +''' +@Author : dingjiawen +@Date : 2022/7/12 17:48 +@Usage : +@Desc : +''' \ No newline at end of file diff --git a/TensorFlow_eaxmple/Model_train_test/model/CommonFunction/CommonFunction.py b/TensorFlow_eaxmple/Model_train_test/model/CommonFunction/CommonFunction.py new file mode 100644 index 0000000..c619e90 --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/model/CommonFunction/CommonFunction.py @@ -0,0 +1,163 @@ +# -*- coding: utf-8 -*- + +# coding: utf-8 + + +''' +@Author : dingjiawen +@Date : 2022/7/14 17:36 +@Usage : +@Desc : +''' + +import os +import shutil +import tensorflow as tf +import tensorflow.keras as keras +import numpy as np +import pandas as pd + + +def folderGenerate(folder_name): + if not os.path.exists(folder_name): + os.makedirs(folder_name) + # os.mkdir(folder_name) + + +# 递归删除文件夹 +def folderDelete(folder_name): + if os.path.exists(folder_name): + shutil.rmtree(folder_name) + + +# 判断这次是否进行模型保存,history_loss存储历史上的loss +def SaveBestModel(model, save_name, history_loss, loss_value, pattern: str = "min",epoch=0,is_all=False): + weight_folder = save_name[:-7] + if is_all: + weight_folder=weight_folder+'_epoch'+str(epoch)+"_"+str(loss_value) + save_name=weight_folder+save_name[-7:] + + # 如果history_loss为空,那么直接保存 + if len(history_loss) == 0: + folderGenerate(weight_folder) + model.save_weights(save_name) + return + + if pattern == "min": + # 先判断要不要存模型,如果上一次的比这一次的loss要大,就保存这一次的 + if np.min(history_loss) > loss_value: + # 删除上一次的保存这一次的 + folderDelete(weight_folder) + folderGenerate(weight_folder) + model.save_weights(save_name) + print("保存这次模型") + return + elif pattern == "max": + # 先判断要不要存模型,如果上一次的比这一次的loss要大,就保存这一次的 + if np.max(history_loss) < loss_value: + # 删除上一次的保存这一次的 + folderDelete(weight_folder) + folderGenerate(weight_folder) + model.save_weights(save_name) + print("保存这次模型") + return + else: + raise ValueError("算法尚未实现") + + pass + + +# 判断这次是否进行模型保存,history_loss存储历史上的loss +def SaveBestModelByAccuracy(model, save_name, history_accuracy, accuracy_value): + weight_folder = save_name[:-7] + + # 如果history_loss为空,那么直接保存 + if len(history_accuracy) == 0: + folderGenerate(weight_folder) + model.save_weights(save_name) + return + + # 先判断要不要存模型,如果上一次的比这一次的loss要大,就保存这一次的 + if np.max(history_accuracy) < accuracy_value: + # 删除上一次的保存这一次的 + folderDelete(weight_folder) + folderGenerate(weight_folder) + model.save_weights(save_name) + print("保存这次模型") + return + + pass + + +# 判断这次是否进行模型保存,history_loss存储历史上的loss +def SaveBestH5Model(model: tf.keras.Model, save_name, history_loss, loss_value): + dirpath = os.path.dirname(save_name) + folderGenerate(dirpath) + # 如果history_loss为空,那么直接保存 + if len(history_loss) == 0: + model.save(save_name) + return + + # 先判断要不要存模型,如果上一次的比这一次的loss要大,就保存这一次的 + if np.min(history_loss) > loss_value: + # 删除上一次的保存这一次的 + model.save(save_name, overwrite=True) + print("保存这次模型") + return + + pass + + +def IsStopTraining(history_loss, patience=5, pattern: str = "min"): + if len(history_loss) <= patience: + return False + if pattern == "min": + for i in range(1, patience): + if history_loss[-(patience + 1)] > history_loss[-i]: + return False + elif pattern == "max": + for i in range(1, patience): + if history_loss[-(patience + 1)] > history_loss[-i]: + return False + else: + raise ValueError("算法尚未实现") + print(patience, "次loss未下降,训练停止") + return True + + +def shuffle(data, label): + label = tf.expand_dims(label, axis=-1) + total = tf.concat([data, label], axis=-1) + total = tf.random.shuffle(total) + data = total[:, :, :-1] + label = total[:, :, -1] + return data, label + + +def splitValData(data, label, val_radio=0.2): + size, filter_num, dims = data.shape + val_data = data[:int(size * val_radio), :, :] + train_data = data[int(size * val_radio):, :, :] + val_label = label[:int(size * val_radio), :] + train_label = label[int(size * val_radio):, :] + val_query = tf.expand_dims(val_label, axis=-1) + train_query = tf.expand_dims(train_label, axis=-1) + + return (train_data, train_label, train_query), (val_data, val_label, val_query) + + +def Is_Reduce_learning_rate(history_loss, patience=3, pattern: str = "min"): + if len(history_loss) <= patience: + return False + if pattern == "min": + for i in range(patience): + if history_loss[-(patience + 1)] > history_loss[-i]: + return False + elif pattern == "max": + for i in range(patience): + if history_loss[-(patience + 1)] < history_loss[-i]: + return False + else: + raise ValueError("算法尚未实现") + print(patience, "次loss未下降,降低学习率") + return True \ No newline at end of file diff --git a/TensorFlow_eaxmple/Model_train_test/model/CommonFunction/__init__.py b/TensorFlow_eaxmple/Model_train_test/model/CommonFunction/__init__.py new file mode 100644 index 0000000..eeabec8 --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/model/CommonFunction/__init__.py @@ -0,0 +1,9 @@ +# _*_ coding: UTF-8 _*_ + + +''' +@Author : dingjiawen +@Date : 2022/7/14 17:36 +@Usage : +@Desc : +''' \ No newline at end of file diff --git a/TensorFlow_eaxmple/Model_train_test/model/DepthwiseCon1D/DepthwiseConv1D.py b/TensorFlow_eaxmple/Model_train_test/model/DepthwiseCon1D/DepthwiseConv1D.py new file mode 100644 index 0000000..d3217af --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/model/DepthwiseCon1D/DepthwiseConv1D.py @@ -0,0 +1,247 @@ +# _*_ coding: UTF-8 _*_ + + +''' +@Author : dingjiawen +@Date : 2022/7/12 16:59 +@Usage : +@Desc : +''' +import tensorflow as tf +from tensorflow.python.framework import tensor_shape +from tensorflow.python.keras import models, layers, initializers, regularizers, constraints +from tensorflow.python.keras.layers import Conv1D +from tensorflow.python.keras.engine.input_spec import InputSpec +from tensorflow.python.keras.utils import conv_utils +from tensorflow.python.keras.utils import tf_utils +from tensorflow.python.keras import backend +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import nn +from tensorflow.python.ops import nn_ops +from tensorflow.python.util.tf_export import keras_export + +""" + Depthwise separable 1D convolution. + Depthwise Separable convolutions consists in performing + just the first step in a depthwise spatial convolution + (which acts on each input channel separately). + The `depth_multiplier` argument controls how many + output channels are generated per input channel in the depthwise step. + Arguments: + kernel_size: An integer, specifying the + length of the 1D convolution window. + strides: An integer specifying the strides of the convolution. + padding: one of `'valid'` or `'same'` (case-insensitive). + common_kernel: if set to True, same kernel is applied to each channel, + if False, separate kernel is applied to each channel (default case) + depth_multiplier: The number of depthwise convolution output channels + for each input channel. + The total number of depthwise convolution output + channels will be equal to `filters_in * depth_multiplier`. + data_format: A string, + one of `channels_last` (default) or `channels_first`. + The ordering of the dimensions in the inputs. + `channels_last` corresponds to inputs with shape + `(batch_size, length, channels)` while `channels_first` + corresponds to inputs with shape + `(batch_size, channels, length)`. + It defaults to the `image_data_format` value found in your + Keras config file at `~/.keras/keras.json`. + If you never set it, then it will be 'channels_last'. + activation: Activation function to use. + If you don't specify anything, no activation is applied ( + see `keras.activations`). + use_bias: Boolean, whether the layer uses a bias vector. + depthwise_initializer: Initializer for the depthwise kernel matrix ( + see `keras.initializers`). + bias_initializer: Initializer for the bias vector ( + see `keras.initializers`). + depthwise_regularizer: Regularizer function applied to + the depthwise kernel matrix (see `keras.regularizers`). + bias_regularizer: Regularizer function applied to the bias vector ( + see `keras.regularizers`). + activity_regularizer: Regularizer function applied to + the output of the layer (its 'activation') ( + see `keras.regularizers`). + depthwise_constraint: Constraint function applied to + the depthwise kernel matrix ( + see `keras.constraints`). + bias_constraint: Constraint function applied to the bias vector ( + see `keras.constraints`). + Input shape: + 3D tensor with shape: + `[batch_size, channels, length]` if data_format='channels_first' + or 3D tensor with shape: + `[batch_size, length, channels]` if data_format='channels_last'. + Output shape: + 3D tensor with shape: + `[batch_size, filters, new_length]` if data_format='channels_first' + or 3D tensor with shape: + `[batch_size, new_length, filters]` if data_format='channels_last'. + `length` value might have changed due to padding. + Returns: + A tensor of rank 3 representing + `activation(depthwiseconv1d(inputs, kernel) + bias)`. + """ + + +class DepthwiseConv1D(Conv1D): + def __init__(self, + kernel_size, + strides=1, + padding='valid', + common_kernel=False, + depth_multiplier=1, + data_format=None, + activation=None, + use_bias=True, + depthwise_initializer='glorot_uniform', + bias_initializer='zeros', + depthwise_regularizer=None, + bias_regularizer=None, + activity_regularizer=None, + depthwise_constraint=None, + bias_constraint=None, + dilation_rate=1, + **kwargs): + super(DepthwiseConv1D, self).__init__( + filters=None, + kernel_size=kernel_size, + strides=strides, + padding=padding, + data_format=data_format, + activation=activation, + use_bias=use_bias, + bias_regularizer=bias_regularizer, + activity_regularizer=activity_regularizer, + bias_constraint=bias_constraint, + **kwargs) + + self.common_kernel = common_kernel + self.depth_multiplier = depth_multiplier + self.depthwise_initializer = initializers.get(depthwise_initializer) + self.depthwise_regularizer = regularizers.get(depthwise_regularizer) + self.depthwise_constraint = constraints.get(depthwise_constraint) + self.bias_initializer = initializers.get(bias_initializer) + self.dilation_rate = (dilation_rate,dilation_rate) + + # For compatibility with some older versions of Keras + def _get_channel_axis(self): + if self.data_format == 'channels_first': + return 1 + else: + return -1 + + def build(self, input_shape): + if len(input_shape) != 3: + raise ValueError('Inputs to `DepthwiseConv1D` should have rank 3. ' + 'Received input shape:', str(input_shape)) + input_shape = tensor_shape.TensorShape(input_shape) + channel_axis = self._get_channel_axis() + if input_shape.dims[channel_axis].value is None: + raise ValueError('The channel dimension of the inputs to ' + '`DepthwiseConv1D` should be defined. Found `None`.') + input_dim = int(input_shape[channel_axis]) + kernel_dim = 1 if self.common_kernel == True else input_dim + depthwise_kernel_shape = (self.kernel_size[0], kernel_dim, self.depth_multiplier) + + self.channels = input_dim + + self.depthwise_kernel = self.add_weight( + shape=depthwise_kernel_shape, + initializer=self.depthwise_initializer, + name='depthwise_kernel', + regularizer=self.depthwise_regularizer, + constraint=self.depthwise_constraint) + + if self.use_bias: + self.bias = self.add_weight(shape=(kernel_dim * self.depth_multiplier,), + initializer=self.bias_initializer, + name='bias', + regularizer=self.bias_regularizer, + constraint=self.bias_constraint) + else: + self.bias = None + self.input_spec = InputSpec(ndim=3, axes={channel_axis: input_dim}) + self.built = True + + def call(self, inputs): + if self.padding == 'causal': + inputs = array_ops.pad(inputs, self._compute_causal_padding()) + if self.data_format == 'channels_last': + strides = (1,) + self.strides * 2 + (1,) + spatial_start_dim = 1 + else: + strides = (1, 1) + self.strides * 2 + spatial_start_dim = 2 + + # Explicitly broadcast inputs and kernels to 4D. + inputs = array_ops.expand_dims(inputs, spatial_start_dim) + + if self.common_kernel == True: + # Need to replicate kernel {channels} times over axis 1 + dw_kernel = tf.tile(self.depthwise_kernel, (1, self.channels, 1)) + bias_kernel = tf.tile(self.bias, (self.channels,)) + else: + dw_kernel = self.depthwise_kernel + bias_kernel = self.bias + + dw_kernel = array_ops.expand_dims(dw_kernel, 0) + + if self.padding == 'causal': + op_padding = 'valid' + else: + op_padding = self.padding + outputs = nn.depthwise_conv2d( + inputs, + dw_kernel, + strides=strides, + padding=op_padding.upper(), + data_format=conv_utils.convert_data_format(self.data_format, ndim=4)) + # outputs = backend.depthwise_conv2d( + # inputs, + # dw_kernel, + # strides=strides, + # padding=op_padding.upper(), + # dilation_rate=self.dilation_rate, + # data_format='channels_last') + + outputs = array_ops.squeeze(outputs, [spatial_start_dim]) + + if self.use_bias: + outputs = backend.bias_add(outputs, bias_kernel, data_format=self.data_format) + + if self.activation is not None: + return self.activation(outputs) + + return outputs + + @tf_utils.shape_type_conversion + def compute_output_shape(self, input_shape): + if self.data_format == 'channels_first': + length = input_shape[2] + out_filters = input_shape[1] * self.depth_multiplier + elif self.data_format == 'channels_last': + length = input_shape[1] + out_filters = input_shape[2] * self.depth_multiplier + + length_new = conv_utils.conv_output_length(length, self.kernel_size, + self.padding, + self.strides) + + if self.data_format == 'channels_first': + return (input_shape[0], out_filters, length_new) + elif self.data_format == 'channels_last': + return (input_shape[0], length_new, out_filters) + + def get_config(self): + config = super(DepthwiseConv1D, self).get_config() + config.pop('filters') + config.pop('kernel_initializer') + config.pop('kernel_regularizer') + config.pop('kernel_constraint') + config['depth_multiplier'] = self.depth_multiplier + config['depthwise_initializer'] = initializers.serialize(self.depthwise_initializer) + config['depthwise_regularizer'] = regularizers.serialize(self.depthwise_regularizer) + config['depthwise_constraint'] = constraints.serialize(self.depthwise_constraint) + return config \ No newline at end of file diff --git a/TensorFlow_eaxmple/Model_train_test/model/DepthwiseCon1D/__init__.py b/TensorFlow_eaxmple/Model_train_test/model/DepthwiseCon1D/__init__.py new file mode 100644 index 0000000..d7df0ff --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/model/DepthwiseCon1D/__init__.py @@ -0,0 +1,9 @@ +# _*_ coding: UTF-8 _*_ + + +''' +@Author : dingjiawen +@Date : 2022/7/12 16:59 +@Usage : +@Desc : +''' \ No newline at end of file diff --git a/TensorFlow_eaxmple/Model_train_test/model/Joint_Monitoring/Joint_Monitoring.py b/TensorFlow_eaxmple/Model_train_test/model/Joint_Monitoring/Joint_Monitoring.py new file mode 100644 index 0000000..87b0ac4 --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/model/Joint_Monitoring/Joint_Monitoring.py @@ -0,0 +1,402 @@ +# _*_ coding: UTF-8 _*_ + + +''' +@Author : dingjiawen +@Date : 2022/7/14 9:40 +@Usage : 联合监测模型 +@Desc : 将CNN特征提取结果放入分类器 +''' + +import tensorflow as tf +import tensorflow.keras as keras +from tensorflow.keras import * +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt +from model.DepthwiseCon1D.DepthwiseConv1D import DepthwiseConv1D +from model.Dynamic_channelAttention.Dynamic_channelAttention import DynamicChannelAttention, DynamicPooling +from condition_monitoring.data_deal import loadData +from model.LossFunction.smooth_L1_Loss import SmoothL1Loss + + +class Joint_Monitoring(keras.Model): + + def __init__(self, conv_filter=20): + # 调用父类__init__()方法 + super(Joint_Monitoring, self).__init__() + # step one + self.RepDCBlock1 = RevConvBlock(num=3, kernel_size=5) + self.conv1 = tf.keras.layers.Conv1D(filters=conv_filter, kernel_size=1, strides=2, padding='SAME') + self.upsample1 = tf.keras.layers.UpSampling1D(size=2) + + self.DACU2 = DynamicChannelAttention() + self.RepDCBlock2 = RevConvBlock(num=3, kernel_size=3) + self.conv2 = tf.keras.layers.Conv1D(filters=2 * conv_filter, kernel_size=1, strides=2, padding='SAME') + self.upsample2 = tf.keras.layers.UpSampling1D(size=2) + + self.DACU3 = DynamicChannelAttention() + self.RepDCBlock3 = RevConvBlock(num=3, kernel_size=3) + self.p1 = DynamicPooling(pool_size=2) + self.conv3 = tf.keras.layers.Conv1D(filters=2 * conv_filter, kernel_size=3, strides=2, padding='SAME') + + self.DACU4 = DynamicChannelAttention() + self.RepDCBlock4 = RevConvBlock(num=3, kernel_size=3) + self.p2 = DynamicPooling(pool_size=4) + self.conv4 = tf.keras.layers.Conv1D(filters=conv_filter, kernel_size=3, strides=2, padding='SAME') + + self.RepDCBlock5 = RevConvBlock(num=3, kernel_size=3) + self.p3 = DynamicPooling(pool_size=2) + + # step two + # 重现原数据 + self.GRU1 = tf.keras.layers.GRU(128, return_sequences=False) + self.d1 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu) + self.output1 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu) + + self.GRU2 = tf.keras.layers.GRU(128, return_sequences=False) + self.d2 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu) + self.output2 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu) + + self.GRU3 = tf.keras.layers.GRU(128, return_sequences=False) + self.d3 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu) + self.output3 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu) + + # step three + # 分类器 + self.d4 = tf.keras.layers.Dense(1024, activation=tf.nn.leaky_relu) + self.d5 = tf.keras.layers.Dense(512, activation=tf.nn.leaky_relu) + # tf.nn.softmax + self.output4 = tf.keras.layers.Dense(1, activation=tf.nn.sigmoid) + + # loss + self.train_loss = [] + + def call(self, inputs, training=None, mask=None, is_first_time: bool = True): + # step one + RepDCBlock1 = self.RepDCBlock1(inputs) + RepDCBlock1 = tf.keras.layers.BatchNormalization()(RepDCBlock1) + conv1 = self.conv1(RepDCBlock1) + conv1 = tf.nn.leaky_relu(conv1) + conv1 = tf.keras.layers.BatchNormalization()(conv1) + upsample1 = self.upsample1(conv1) + + DACU2 = self.DACU2(upsample1) + DACU2 = tf.keras.layers.BatchNormalization()(DACU2) + RepDCBlock2 = self.RepDCBlock2(DACU2) + RepDCBlock2 = tf.keras.layers.BatchNormalization()(RepDCBlock2) + conv2 = self.conv2(RepDCBlock2) + conv2 = tf.nn.leaky_relu(conv2) + conv2 = tf.keras.layers.BatchNormalization()(conv2) + upsample2 = self.upsample2(conv2) + + DACU3 = self.DACU3(upsample2) + DACU3 = tf.keras.layers.BatchNormalization()(DACU3) + RepDCBlock3 = self.RepDCBlock3(DACU3) + RepDCBlock3 = tf.keras.layers.BatchNormalization()(RepDCBlock3) + conv3 = self.conv3(RepDCBlock3) + conv3 = tf.nn.leaky_relu(conv3) + conv3 = tf.keras.layers.BatchNormalization()(conv3) + + concat1 = tf.concat([conv2, conv3], axis=1) + + DACU4 = self.DACU4(concat1) + DACU4 = tf.keras.layers.BatchNormalization()(DACU4) + RepDCBlock4 = self.RepDCBlock4(DACU4) + RepDCBlock4 = tf.keras.layers.BatchNormalization()(RepDCBlock4) + conv4 = self.conv4(RepDCBlock4) + conv4 = tf.nn.leaky_relu(conv4) + conv4 = tf.keras.layers.BatchNormalization()(conv4) + + concat2 = tf.concat([conv1, conv4], axis=1) + + RepDCBlock5 = self.RepDCBlock5(concat2) + RepDCBlock5 = tf.keras.layers.BatchNormalization()(RepDCBlock5) + + output1 = [] + output2 = [] + output3 = [] + output4 = [] + + if is_first_time: + # step two + # 重现原数据 + # 接block3 + GRU1 = self.GRU1(RepDCBlock3) + GRU1 = tf.keras.layers.BatchNormalization()(GRU1) + d1 = self.d1(GRU1) + # tf.nn.softmax + output1 = self.output1(d1) + # 接block4 + GRU2 = self.GRU2(RepDCBlock4) + GRU2 = tf.keras.layers.BatchNormalization()(GRU2) + d2 = self.d2(GRU2) + # tf.nn.softmax + output2 = self.output2(d2) + # 接block5 + GRU3 = self.GRU3(RepDCBlock5) + GRU3 = tf.keras.layers.BatchNormalization()(GRU3) + d3 = self.d3(GRU3) + # tf.nn.softmax + output3 = self.output3(d3) + else: + # 多尺度动态池化 + # 多尺度动态池化 + p1 = self.p1(RepDCBlock3) + B, _, _ = p1.shape + f1 = tf.reshape(p1, shape=[B, -1]) + p2 = self.p2(RepDCBlock4) + f2 = tf.reshape(p2, shape=[B, -1]) + p3 = self.p3(RepDCBlock5) + f3 = tf.reshape(p3, shape=[B, -1]) + # step three + # 分类器 + concat3 = tf.concat([f1, f2, f3], axis=0) + d4 = self.d4(concat3) + d4 = tf.keras.layers.BatchNormalization()(d4) + output4 = self.output4(d4) + + return RepDCBlock3, RepDCBlock4, RepDCBlock5, output4 + + def get_loss(self, inputs_tensor, label1=None, label2=None, is_first_time: bool = True, pred_3=None, pred_4=None, + pred_5=None): + # step one + RepDCBlock1 = self.RepDCBlock1(inputs_tensor) + RepDCBlock1 = tf.keras.layers.BatchNormalization()(RepDCBlock1) + conv1 = self.conv1(RepDCBlock1) + conv1 = tf.nn.leaky_relu(conv1) + conv1 = tf.keras.layers.BatchNormalization()(conv1) + upsample1 = self.upsample1(conv1) + + DACU2 = self.DACU2(upsample1) + DACU2 = tf.keras.layers.BatchNormalization()(DACU2) + RepDCBlock2 = self.RepDCBlock2(DACU2) + RepDCBlock2 = tf.keras.layers.BatchNormalization()(RepDCBlock2) + conv2 = self.conv2(RepDCBlock2) + conv2 = tf.nn.leaky_relu(conv2) + conv2 = tf.keras.layers.BatchNormalization()(conv2) + upsample2 = self.upsample2(conv2) + + DACU3 = self.DACU3(upsample2) + DACU3 = tf.keras.layers.BatchNormalization()(DACU3) + RepDCBlock3 = self.RepDCBlock3(DACU3) + RepDCBlock3 = tf.keras.layers.BatchNormalization()(RepDCBlock3) + conv3 = self.conv3(RepDCBlock3) + conv3 = tf.nn.leaky_relu(conv3) + conv3 = tf.keras.layers.BatchNormalization()(conv3) + + concat1 = tf.concat([conv2, conv3], axis=1) + + DACU4 = self.DACU4(concat1) + DACU4 = tf.keras.layers.BatchNormalization()(DACU4) + RepDCBlock4 = self.RepDCBlock4(DACU4) + RepDCBlock4 = tf.keras.layers.BatchNormalization()(RepDCBlock4) + conv4 = self.conv4(RepDCBlock4) + conv4 = tf.nn.leaky_relu(conv4) + conv4 = tf.keras.layers.BatchNormalization()(conv4) + + concat2 = tf.concat([conv1, conv4], axis=1) + + RepDCBlock5 = self.RepDCBlock5(concat2) + RepDCBlock5 = tf.keras.layers.BatchNormalization()(RepDCBlock5) + + if is_first_time: + # step two + # 重现原数据 + # 接block3 + GRU1 = self.GRU1(RepDCBlock3) + GRU1 = tf.keras.layers.BatchNormalization()(GRU1) + d1 = self.d1(GRU1) + # tf.nn.softmax + output1 = self.output1(d1) + # 接block4 + GRU2 = self.GRU2(RepDCBlock4) + GRU2 = tf.keras.layers.BatchNormalization()(GRU2) + d2 = self.d2(GRU2) + # tf.nn.softmax + output2 = self.output2(d2) + # 接block5 + GRU3 = self.GRU3(RepDCBlock5) + GRU3 = tf.keras.layers.BatchNormalization()(GRU3) + d3 = self.d3(GRU3) + # tf.nn.softmax + output3 = self.output3(d3) + + # reduce_mean降维计算均值 + MSE_loss1 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output1)) + MSE_loss2 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output2)) + MSE_loss3 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output3)) + + print("MSE_loss1:", MSE_loss1.numpy()) + print("MSE_loss2:", MSE_loss2.numpy()) + print("MSE_loss3:", MSE_loss3.numpy()) + loss = MSE_loss1 + MSE_loss2 + MSE_loss3 + else: + # 多尺度动态池化 + p1 = self.p1(RepDCBlock3) + B, _, _ = p1.shape + f1 = tf.reshape(p1, shape=[B, -1]) + p2 = self.p2(RepDCBlock4) + f2 = tf.reshape(p2, shape=[B, -1]) + p3 = self.p3(RepDCBlock5) + f3 = tf.reshape(p3, shape=[B, -1]) + # step three + # 分类器 + concat3 = tf.concat([f1, f2, f3], axis=1) + # dropout = tf.keras.layers.Dropout(0.25)(concat3) + d4 = self.d4(concat3) + d5 = self.d5(d4) + # d4 = tf.keras.layers.BatchNormalization()(d4) + output4 = self.output4(d5) + + # reduce_mean降维计算均值 + MSE_loss = SmoothL1Loss()(y_true=pred_3, y_pred=RepDCBlock3) + MSE_loss += SmoothL1Loss()(y_true=pred_4, y_pred=RepDCBlock4) + MSE_loss += SmoothL1Loss()(y_true=pred_5, y_pred=RepDCBlock5) + Cross_Entropy_loss = tf.reduce_mean( + tf.losses.binary_crossentropy(y_true=label2, y_pred=output4, from_logits=True)) + + print("MSE_loss:", MSE_loss.numpy()) + print("Cross_Entropy_loss:", Cross_Entropy_loss.numpy()) + Accuracy_num = self.get_Accuracy(label=label2, output=output4) + loss = MSE_loss + 100 * Cross_Entropy_loss + return loss, Accuracy_num + + def get_Accuracy(self, output, label): + + predict_label = tf.round(output) + label = tf.cast(label, dtype=tf.float32) + + t = np.array(label - predict_label) + + b = t[t[:] == 0] + + return b.__len__() + + def get_grad(self, input_tensor, label1=None, label2=None, is_first_time: bool = True, pred_3=None, pred_4=None, + pred_5=None): + with tf.GradientTape() as tape: + # todo 原本tape只会监控由tf.Variable创建的trainable=True属性 + # tape.watch(self.variables) + L, Accuracy_num = self.get_loss(input_tensor, label1=label1, label2=label2, is_first_time=is_first_time, + pred_3=pred_3, + pred_4=pred_4, pred_5=pred_5) + # 保存一下loss,用于输出 + self.train_loss = L + g = tape.gradient(L, self.variables) + return g, Accuracy_num + + def train(self, input_tensor, label1=None, label2=None, learning_rate=1e-3, is_first_time: bool = True, pred_3=None, + pred_4=None, pred_5=None): + g, Accuracy_num = self.get_grad(input_tensor, label1=label1, label2=label2, is_first_time=is_first_time, + pred_3=pred_3, + pred_4=pred_4, pred_5=pred_5) + optimizers.Adam(learning_rate).apply_gradients(zip(g, self.variables)) + return self.train_loss, Accuracy_num + + # 暂时只支持batch_size等于1,不然要传z比较麻烦 + def get_val_loss(self, val_data, val_label1, val_label2, batch_size=16, is_first_time: bool = True, + step_one_model=None): + val_loss = [] + accuracy_num = 0 + size, length, dims = val_data.shape + if batch_size == None: + batch_size = self.batch_size + for epoch in range(0, size - batch_size, batch_size): + each_val_data = val_data[epoch:epoch + batch_size, :, :] + each_val_label1 = val_label1[epoch:epoch + batch_size, :] + each_val_label2 = val_label2[epoch:epoch + batch_size, ] + # each_val_data = tf.expand_dims(each_val_data, axis=0) + # each_val_query = tf.expand_dims(each_val_query, axis=0) + # each_val_label = tf.expand_dims(each_val_label, axis=0) + if not is_first_time: + output1, output2, output3, _ = step_one_model.call(inputs=each_val_data, is_first_time=True) + + each_loss, each_accuracy_num = self.get_loss(each_val_data, each_val_label1, each_val_label2, + is_first_time=is_first_time, + pred_3=output1, pred_4=output2, pred_5=output3) + accuracy_num += each_accuracy_num + val_loss.append(each_loss) + val_accuracy = accuracy_num / (epoch - 1) * batch_size + val_total_loss = tf.reduce_mean(val_loss) + return val_total_loss, val_accuracy + + +class RevConv(keras.layers.Layer): + + def __init__(self, kernel_size=3): + # 调用父类__init__()方法 + super(RevConv, self).__init__() + self.kernel_size = kernel_size + + def get_config(self): + # 自定义层里面的属性 + config = ( + { + 'kernel_size': self.kernel_size + } + ) + base_config = super(RevConv, self).get_config() + return dict(list(base_config.items()) + list(config.items())) + + def build(self, input_shape): + # print(input_shape) + _, _, output_dim = input_shape[0], input_shape[1], input_shape[2] + self.conv1 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=self.kernel_size, strides=1, + padding='causal', + dilation_rate=4) + + self.conv2 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=1, strides=1, padding='causal', + dilation_rate=4) + # self.b2 = tf.keras.layers.BatchNormalization() + + # self.b3 = tf.keras.layers.BatchNormalization() + + # out = tf.keras.layers.Add()([b1, b2, b3]) + # out = tf.nn.relu(out) + + def call(self, inputs, **kwargs): + conv1 = self.conv1(inputs) + b1 = tf.keras.layers.BatchNormalization()(conv1) + b1 = tf.nn.leaky_relu(b1) + # b1 = self.b1 + + conv2 = self.conv2(inputs) + b2 = tf.keras.layers.BatchNormalization()(conv2) + b2 = tf.nn.leaky_relu(b2) + + b3 = tf.keras.layers.BatchNormalization()(inputs) + + out = tf.keras.layers.Add()([b1, b2, b3]) + out = tf.nn.relu(out) + + return out + + +class RevConvBlock(keras.layers.Layer): + + def __init__(self, num: int = 3, kernel_size=3): + # 调用父类__init__()方法 + super(RevConvBlock, self).__init__() + self.num = num + self.kernel_size = kernel_size + self.L = [] + for i in range(num): + RepVGG = RevConv(kernel_size=kernel_size) + self.L.append(RepVGG) + + def get_config(self): + # 自定义层里面的属性 + config = ( + { + 'kernel_size': self.kernel_size, + 'num': self.num + } + ) + base_config = super(RevConvBlock, self).get_config() + return dict(list(base_config.items()) + list(config.items())) + + def call(self, inputs, **kwargs): + for i in range(self.num): + inputs = self.L[i](inputs) + return inputs diff --git a/TensorFlow_eaxmple/Model_train_test/model/Joint_Monitoring/Joint_Monitoring2.py b/TensorFlow_eaxmple/Model_train_test/model/Joint_Monitoring/Joint_Monitoring2.py new file mode 100644 index 0000000..440c3b5 --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/model/Joint_Monitoring/Joint_Monitoring2.py @@ -0,0 +1,445 @@ +# _*_ coding: UTF-8 _*_ + + +''' +@Author : dingjiawen +@Date : 2022/7/14 9:40 +@Usage : 联合监测模型 +@Desc : 将预测值放入分类器,已测试,准确率可以到99.7% ,dense层300后直接接output,cross_entropy Loss还放大了一百倍 +''' + +import tensorflow as tf +import tensorflow.keras as keras +from tensorflow.keras import * +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt +from model.DepthwiseCon1D.DepthwiseConv1D import DepthwiseConv1D +from model.Dynamic_channelAttention.Dynamic_channelAttention import DynamicChannelAttention, DynamicPooling +from condition_monitoring.data_deal import loadData +from model.LossFunction.smooth_L1_Loss import SmoothL1Loss + + +class Joint_Monitoring(keras.Model): + + def __init__(self, conv_filter=20): + # 调用父类__init__()方法 + super(Joint_Monitoring, self).__init__() + # step one + self.RepDCBlock1 = RevConvBlock(num=3, kernel_size=5) + self.conv1 = tf.keras.layers.Conv1D(filters=conv_filter, kernel_size=1, strides=2, padding='SAME') + self.upsample1 = tf.keras.layers.UpSampling1D(size=2) + + self.DACU2 = DynamicChannelAttention() + self.RepDCBlock2 = RevConvBlock(num=3, kernel_size=3) + self.conv2 = tf.keras.layers.Conv1D(filters=2 * conv_filter, kernel_size=1, strides=2, padding='SAME') + self.upsample2 = tf.keras.layers.UpSampling1D(size=2) + + self.DACU3 = DynamicChannelAttention() + self.RepDCBlock3 = RevConvBlock(num=3, kernel_size=3) + self.p1 = DynamicPooling(pool_size=2) + self.conv3 = tf.keras.layers.Conv1D(filters=2 * conv_filter, kernel_size=3, strides=2, padding='SAME') + + self.DACU4 = DynamicChannelAttention() + self.RepDCBlock4 = RevConvBlock(num=3, kernel_size=3) + self.p2 = DynamicPooling(pool_size=4) + self.conv4 = tf.keras.layers.Conv1D(filters=conv_filter, kernel_size=3, strides=2, padding='SAME') + + self.RepDCBlock5 = RevConvBlock(num=3, kernel_size=3) + self.p3 = DynamicPooling(pool_size=2) + + # step two + # 重现原数据 + self.GRU1 = tf.keras.layers.GRU(128, return_sequences=False) + self.d1 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu) + self.output1 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu) + + self.GRU2 = tf.keras.layers.GRU(128, return_sequences=False) + self.d2 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu) + self.output2 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu) + + self.GRU3 = tf.keras.layers.GRU(128, return_sequences=False) + self.d3 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu) + self.output3 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu) + + # step three + # 分类器 + self.d4 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu) + self.d5 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu) + # tf.nn.softmax + self.output4 = tf.keras.layers.Dense(1, activation=tf.nn.sigmoid) + + # loss + self.train_loss = [] + + def call(self, inputs, training=None, mask=None, is_first_time: bool = True): + # step one + RepDCBlock1 = self.RepDCBlock1(inputs) + RepDCBlock1 = tf.keras.layers.BatchNormalization()(RepDCBlock1) + conv1 = self.conv1(RepDCBlock1) + conv1 = tf.nn.leaky_relu(conv1) + conv1 = tf.keras.layers.BatchNormalization()(conv1) + upsample1 = self.upsample1(conv1) + + DACU2 = self.DACU2(upsample1) + DACU2 = tf.keras.layers.BatchNormalization()(DACU2) + RepDCBlock2 = self.RepDCBlock2(DACU2) + RepDCBlock2 = tf.keras.layers.BatchNormalization()(RepDCBlock2) + conv2 = self.conv2(RepDCBlock2) + conv2 = tf.nn.leaky_relu(conv2) + conv2 = tf.keras.layers.BatchNormalization()(conv2) + upsample2 = self.upsample2(conv2) + + DACU3 = self.DACU3(upsample2) + DACU3 = tf.keras.layers.BatchNormalization()(DACU3) + RepDCBlock3 = self.RepDCBlock3(DACU3) + RepDCBlock3 = tf.keras.layers.BatchNormalization()(RepDCBlock3) + conv3 = self.conv3(RepDCBlock3) + conv3 = tf.nn.leaky_relu(conv3) + conv3 = tf.keras.layers.BatchNormalization()(conv3) + + concat1 = tf.concat([conv2, conv3], axis=1) + + DACU4 = self.DACU4(concat1) + DACU4 = tf.keras.layers.BatchNormalization()(DACU4) + RepDCBlock4 = self.RepDCBlock4(DACU4) + RepDCBlock4 = tf.keras.layers.BatchNormalization()(RepDCBlock4) + conv4 = self.conv4(RepDCBlock4) + conv4 = tf.nn.leaky_relu(conv4) + conv4 = tf.keras.layers.BatchNormalization()(conv4) + + concat2 = tf.concat([conv1, conv4], axis=1) + + RepDCBlock5 = self.RepDCBlock5(concat2) + RepDCBlock5 = tf.keras.layers.BatchNormalization()(RepDCBlock5) + + output1 = [] + output2 = [] + output3 = [] + output4 = [] + + if is_first_time: + # step two + # 重现原数据 + # 接block3 + GRU1 = self.GRU1(RepDCBlock3) + GRU1 = tf.keras.layers.BatchNormalization()(GRU1) + d1 = self.d1(GRU1) + # tf.nn.softmax + output1 = self.output1(d1) + # 接block4 + GRU2 = self.GRU2(RepDCBlock4) + GRU2 = tf.keras.layers.BatchNormalization()(GRU2) + d2 = self.d2(GRU2) + # tf.nn.softmax + output2 = self.output2(d2) + # 接block5 + GRU3 = self.GRU3(RepDCBlock5) + GRU3 = tf.keras.layers.BatchNormalization()(GRU3) + d3 = self.d3(GRU3) + # tf.nn.softmax + output3 = self.output3(d3) + else: + GRU1 = self.GRU1(RepDCBlock3) + GRU1 = tf.keras.layers.BatchNormalization()(GRU1) + d1 = self.d1(GRU1) + # tf.nn.softmax + output1 = self.output1(d1) + # 接block4 + GRU2 = self.GRU2(RepDCBlock4) + GRU2 = tf.keras.layers.BatchNormalization()(GRU2) + d2 = self.d2(GRU2) + # tf.nn.softmax + output2 = self.output2(d2) + # 接block5 + GRU3 = self.GRU3(RepDCBlock5) + GRU3 = tf.keras.layers.BatchNormalization()(GRU3) + d3 = self.d3(GRU3) + # tf.nn.softmax + output3 = self.output3(d3) + + # 多尺度动态池化 + # p1 = self.p1(output1) + # B, _, _ = p1.shape + # f1 = tf.reshape(p1, shape=[B, -1]) + # p2 = self.p2(output2) + # f2 = tf.reshape(p2, shape=[B, -1]) + # p3 = self.p3(output3) + # f3 = tf.reshape(p3, shape=[B, -1]) + # step three + # 分类器 + concat3 = tf.concat([output1, output2, output3], axis=1) + # dropout = tf.keras.layers.Dropout(0.25)(concat3) + d4 = self.d4(concat3) + # d5 = self.d5(d4) + # d4 = tf.keras.layers.BatchNormalization()(d4) + output4 = self.output4(d4) + + return output1, output2, output3, output4 + + def get_loss(self, inputs_tensor, label1=None, label2=None, is_first_time: bool = True, pred_3=None, pred_4=None, + pred_5=None): + # step one + RepDCBlock1 = self.RepDCBlock1(inputs_tensor) + RepDCBlock1 = tf.keras.layers.BatchNormalization()(RepDCBlock1) + conv1 = self.conv1(RepDCBlock1) + conv1 = tf.nn.leaky_relu(conv1) + conv1 = tf.keras.layers.BatchNormalization()(conv1) + upsample1 = self.upsample1(conv1) + + DACU2 = self.DACU2(upsample1) + DACU2 = tf.keras.layers.BatchNormalization()(DACU2) + RepDCBlock2 = self.RepDCBlock2(DACU2) + RepDCBlock2 = tf.keras.layers.BatchNormalization()(RepDCBlock2) + conv2 = self.conv2(RepDCBlock2) + conv2 = tf.nn.leaky_relu(conv2) + conv2 = tf.keras.layers.BatchNormalization()(conv2) + upsample2 = self.upsample2(conv2) + + DACU3 = self.DACU3(upsample2) + DACU3 = tf.keras.layers.BatchNormalization()(DACU3) + RepDCBlock3 = self.RepDCBlock3(DACU3) + RepDCBlock3 = tf.keras.layers.BatchNormalization()(RepDCBlock3) + conv3 = self.conv3(RepDCBlock3) + conv3 = tf.nn.leaky_relu(conv3) + conv3 = tf.keras.layers.BatchNormalization()(conv3) + + concat1 = tf.concat([conv2, conv3], axis=1) + + DACU4 = self.DACU4(concat1) + DACU4 = tf.keras.layers.BatchNormalization()(DACU4) + RepDCBlock4 = self.RepDCBlock4(DACU4) + RepDCBlock4 = tf.keras.layers.BatchNormalization()(RepDCBlock4) + conv4 = self.conv4(RepDCBlock4) + conv4 = tf.nn.leaky_relu(conv4) + conv4 = tf.keras.layers.BatchNormalization()(conv4) + + concat2 = tf.concat([conv1, conv4], axis=1) + + RepDCBlock5 = self.RepDCBlock5(concat2) + RepDCBlock5 = tf.keras.layers.BatchNormalization()(RepDCBlock5) + + if is_first_time: + # step two + # 重现原数据 + # 接block3 + GRU1 = self.GRU1(RepDCBlock3) + GRU1 = tf.keras.layers.BatchNormalization()(GRU1) + d1 = self.d1(GRU1) + # tf.nn.softmax + output1 = self.output1(d1) + # 接block4 + GRU2 = self.GRU2(RepDCBlock4) + GRU2 = tf.keras.layers.BatchNormalization()(GRU2) + d2 = self.d2(GRU2) + # tf.nn.softmax + output2 = self.output2(d2) + # 接block5 + GRU3 = self.GRU3(RepDCBlock5) + GRU3 = tf.keras.layers.BatchNormalization()(GRU3) + d3 = self.d3(GRU3) + # tf.nn.softmax + output3 = self.output3(d3) + + # reduce_mean降维计算均值 + MSE_loss1 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output1)) + MSE_loss2 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output2)) + MSE_loss3 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output3)) + + print("MSE_loss1:", MSE_loss1.numpy()) + print("MSE_loss2:", MSE_loss2.numpy()) + print("MSE_loss3:", MSE_loss3.numpy()) + loss = MSE_loss1 + MSE_loss2 + MSE_loss3 + Accuracy_num=0 + else: + # step two + # 重现原数据 + # 接block3 + GRU1 = self.GRU1(RepDCBlock3) + GRU1 = tf.keras.layers.BatchNormalization()(GRU1) + d1 = self.d1(GRU1) + # tf.nn.softmax + output1 = self.output1(d1) + # 接block4 + GRU2 = self.GRU2(RepDCBlock4) + GRU2 = tf.keras.layers.BatchNormalization()(GRU2) + d2 = self.d2(GRU2) + # tf.nn.softmax + output2 = self.output2(d2) + # 接block5 + GRU3 = self.GRU3(RepDCBlock5) + GRU3 = tf.keras.layers.BatchNormalization()(GRU3) + d3 = self.d3(GRU3) + # tf.nn.softmax + output3 = self.output3(d3) + + # 多尺度动态池化 + # p1 = self.p1(output1) + # B, _, _ = p1.shape + # f1 = tf.reshape(p1, shape=[B, -1]) + # p2 = self.p2(output2) + # f2 = tf.reshape(p2, shape=[B, -1]) + # p3 = self.p3(output3) + # f3 = tf.reshape(p3, shape=[B, -1]) + # step three + # 分类器 + concat3 = tf.concat([output1, output2, output3], axis=1) + # dropout = tf.keras.layers.Dropout(0.25)(concat3) + d4 = self.d4(concat3) + # d5=self.d5(d4) + # d4 = tf.keras.layers.BatchNormalization()(d4) + output4 = self.output4(d4) + + # reduce_mean降维计算均值 + MSE_loss = SmoothL1Loss()(y_true=pred_3, y_pred=output1) + MSE_loss += SmoothL1Loss()(y_true=pred_4, y_pred=output2) + MSE_loss += SmoothL1Loss()(y_true=pred_5, y_pred=output3) + Cross_Entropy_loss = tf.reduce_mean( + tf.losses.binary_crossentropy(y_true=label2, y_pred=output4, from_logits=True)) + + print("MSE_loss:", MSE_loss.numpy()) + print("Cross_Entropy_loss:", Cross_Entropy_loss.numpy()) + Accuracy_num = self.get_Accuracy(label=label2, output=output4) + loss = MSE_loss + Cross_Entropy_loss + + return loss, Accuracy_num + + def get_Accuracy(self, output, label): + + predict_label = tf.round(output) + label = tf.cast(label, dtype=tf.float32) + + t = np.array(label - predict_label) + + b = t[t[:] == 0] + + return b.__len__() + + def get_grad(self, input_tensor, label1=None, label2=None, is_first_time: bool = True, pred_3=None, pred_4=None, + pred_5=None): + with tf.GradientTape() as tape: + # todo 原本tape只会监控由tf.Variable创建的trainable=True属性 + # tape.watch(self.variables) + L, Accuracy_num = self.get_loss(input_tensor, label1=label1, label2=label2, is_first_time=is_first_time, + pred_3=pred_3, + pred_4=pred_4, pred_5=pred_5) + # 保存一下loss,用于输出 + self.train_loss = L + g = tape.gradient(L, self.variables) + return g, Accuracy_num + + def train(self, input_tensor, label1=None, label2=None, learning_rate=1e-3, is_first_time: bool = True, pred_3=None, + pred_4=None, pred_5=None): + g, Accuracy_num = self.get_grad(input_tensor, label1=label1, label2=label2, is_first_time=is_first_time, + pred_3=pred_3, + pred_4=pred_4, pred_5=pred_5) + optimizers.Adam(learning_rate).apply_gradients(zip(g, self.variables)) + return self.train_loss, Accuracy_num + + # 暂时只支持batch_size等于1,不然要传z比较麻烦 + def get_val_loss(self, val_data, val_label1, val_label2, batch_size=16, is_first_time: bool = True, + step_one_model=None): + val_loss = [] + accuracy_num = 0 + size, length, dims = val_data.shape + if batch_size == None: + batch_size = self.batch_size + for epoch in range(0, size - batch_size, batch_size): + each_val_data = val_data[epoch:epoch + batch_size, :, :] + each_val_label1 = val_label1[epoch:epoch + batch_size, :] + each_val_label2 = val_label2[epoch:epoch + batch_size, ] + # each_val_data = tf.expand_dims(each_val_data, axis=0) + # each_val_query = tf.expand_dims(each_val_query, axis=0) + # each_val_label = tf.expand_dims(each_val_label, axis=0) + if not is_first_time: + output1, output2, output3, _ = step_one_model.call(inputs=each_val_data, is_first_time=True) + + each_loss, each_accuracy_num = self.get_loss(each_val_data, each_val_label1, each_val_label2, + is_first_time=is_first_time, + pred_3=output1, pred_4=output2, pred_5=output3) + accuracy_num += each_accuracy_num + val_loss.append(each_loss) + # print(accuracy_num) + val_accuracy = accuracy_num / (epoch+1) * batch_size + val_total_loss = tf.reduce_mean(val_loss) + return val_total_loss, val_accuracy + + +class RevConv(keras.layers.Layer): + + def __init__(self, kernel_size=3): + # 调用父类__init__()方法 + super(RevConv, self).__init__() + self.kernel_size = kernel_size + + def get_config(self): + # 自定义层里面的属性 + config = ( + { + 'kernel_size': self.kernel_size + } + ) + base_config = super(RevConv, self).get_config() + return dict(list(base_config.items()) + list(config.items())) + + def build(self, input_shape): + # print(input_shape) + _, _, output_dim = input_shape[0], input_shape[1], input_shape[2] + self.conv1 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=self.kernel_size, strides=1, + padding='causal', + dilation_rate=4) + + self.conv2 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=1, strides=1, padding='causal', + dilation_rate=4) + # self.b2 = tf.keras.layers.BatchNormalization() + + # self.b3 = tf.keras.layers.BatchNormalization() + + # out = tf.keras.layers.Add()([b1, b2, b3]) + # out = tf.nn.relu(out) + + def call(self, inputs, **kwargs): + conv1 = self.conv1(inputs) + b1 = tf.keras.layers.BatchNormalization()(conv1) + b1 = tf.nn.leaky_relu(b1) + # b1 = self.b1 + + conv2 = self.conv2(inputs) + b2 = tf.keras.layers.BatchNormalization()(conv2) + b2 = tf.nn.leaky_relu(b2) + + b3 = tf.keras.layers.BatchNormalization()(inputs) + + out = tf.keras.layers.Add()([b1, b2, b3]) + out = tf.nn.relu(out) + + return out + + +class RevConvBlock(keras.layers.Layer): + + def __init__(self, num: int = 3, kernel_size=3): + # 调用父类__init__()方法 + super(RevConvBlock, self).__init__() + self.num = num + self.kernel_size = kernel_size + self.L = [] + for i in range(num): + RepVGG = RevConv(kernel_size=kernel_size) + self.L.append(RepVGG) + + def get_config(self): + # 自定义层里面的属性 + config = ( + { + 'kernel_size': self.kernel_size, + 'num': self.num + } + ) + base_config = super(RevConvBlock, self).get_config() + return dict(list(base_config.items()) + list(config.items())) + + def call(self, inputs, **kwargs): + for i in range(self.num): + inputs = self.L[i](inputs) + return inputs diff --git a/TensorFlow_eaxmple/Model_train_test/model/Joint_Monitoring/Joint_Monitoring3.py b/TensorFlow_eaxmple/Model_train_test/model/Joint_Monitoring/Joint_Monitoring3.py new file mode 100644 index 0000000..22d362a --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/model/Joint_Monitoring/Joint_Monitoring3.py @@ -0,0 +1,453 @@ +# _*_ coding: UTF-8 _*_ + + +''' +@Author : dingjiawen +@Date : 2022/7/14 9:40 +@Usage : 联合监测模型 +@Desc : 将预测值放入分类器,分类器放两层逐渐递减的dense层 +''' + +import tensorflow as tf +import tensorflow.keras as keras +from tensorflow.keras import * +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt +from model.DepthwiseCon1D.DepthwiseConv1D import DepthwiseConv1D +from model.Dynamic_channelAttention.Dynamic_channelAttention import DynamicChannelAttention, DynamicPooling +from condition_monitoring.data_deal import loadData +from model.LossFunction.smooth_L1_Loss import SmoothL1Loss +import math + + +class Joint_Monitoring(keras.Model): + + def __init__(self, conv_filter=20): + # 调用父类__init__()方法 + super(Joint_Monitoring, self).__init__() + # step one + self.RepDCBlock1 = RevConvBlock(num=3, kernel_size=5) + self.conv1 = tf.keras.layers.Conv1D(filters=conv_filter, kernel_size=1, strides=2, padding='SAME') + self.upsample1 = tf.keras.layers.UpSampling1D(size=2) + + self.DACU2 = DynamicChannelAttention() + self.RepDCBlock2 = RevConvBlock(num=3, kernel_size=3) + self.conv2 = tf.keras.layers.Conv1D(filters=2 * conv_filter, kernel_size=1, strides=2, padding='SAME') + self.upsample2 = tf.keras.layers.UpSampling1D(size=2) + + self.DACU3 = DynamicChannelAttention() + self.RepDCBlock3 = RevConvBlock(num=3, kernel_size=3) + self.p1 = DynamicPooling(pool_size=2) + self.conv3 = tf.keras.layers.Conv1D(filters=2 * conv_filter, kernel_size=3, strides=2, padding='SAME') + + self.DACU4 = DynamicChannelAttention() + self.RepDCBlock4 = RevConvBlock(num=3, kernel_size=3) + self.p2 = DynamicPooling(pool_size=4) + self.conv4 = tf.keras.layers.Conv1D(filters=conv_filter, kernel_size=3, strides=2, padding='SAME') + + self.RepDCBlock5 = RevConvBlock(num=3, kernel_size=3) + self.p3 = DynamicPooling(pool_size=2) + + # step two + # 重现原数据 + self.GRU1 = tf.keras.layers.GRU(128, return_sequences=False) + self.d1 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu) + self.output1 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu) + + self.GRU2 = tf.keras.layers.GRU(128, return_sequences=False) + self.d2 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu) + self.output2 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu) + + self.GRU3 = tf.keras.layers.GRU(128, return_sequences=False) + self.d3 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu) + self.output3 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu) + + # step three + # 分类器 + self.d4 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu) + self.d5 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu) + # tf.nn.softmax + self.output4 = tf.keras.layers.Dense(1, activation=tf.nn.sigmoid) + + # loss + self.train_loss = [] + + def call(self, inputs, training=None, mask=None, is_first_time: bool = True): + # step one + RepDCBlock1 = self.RepDCBlock1(inputs) + RepDCBlock1 = tf.keras.layers.BatchNormalization()(RepDCBlock1) + conv1 = self.conv1(RepDCBlock1) + conv1 = tf.nn.leaky_relu(conv1) + conv1 = tf.keras.layers.BatchNormalization()(conv1) + upsample1 = self.upsample1(conv1) + + DACU2 = self.DACU2(upsample1) + DACU2 = tf.keras.layers.BatchNormalization()(DACU2) + RepDCBlock2 = self.RepDCBlock2(DACU2) + RepDCBlock2 = tf.keras.layers.BatchNormalization()(RepDCBlock2) + conv2 = self.conv2(RepDCBlock2) + conv2 = tf.nn.leaky_relu(conv2) + conv2 = tf.keras.layers.BatchNormalization()(conv2) + upsample2 = self.upsample2(conv2) + + DACU3 = self.DACU3(upsample2) + DACU3 = tf.keras.layers.BatchNormalization()(DACU3) + RepDCBlock3 = self.RepDCBlock3(DACU3) + RepDCBlock3 = tf.keras.layers.BatchNormalization()(RepDCBlock3) + conv3 = self.conv3(RepDCBlock3) + conv3 = tf.nn.leaky_relu(conv3) + conv3 = tf.keras.layers.BatchNormalization()(conv3) + + concat1 = tf.concat([conv2, conv3], axis=1) + + DACU4 = self.DACU4(concat1) + DACU4 = tf.keras.layers.BatchNormalization()(DACU4) + RepDCBlock4 = self.RepDCBlock4(DACU4) + RepDCBlock4 = tf.keras.layers.BatchNormalization()(RepDCBlock4) + conv4 = self.conv4(RepDCBlock4) + conv4 = tf.nn.leaky_relu(conv4) + conv4 = tf.keras.layers.BatchNormalization()(conv4) + + concat2 = tf.concat([conv1, conv4], axis=1) + + RepDCBlock5 = self.RepDCBlock5(concat2) + RepDCBlock5 = tf.keras.layers.BatchNormalization()(RepDCBlock5) + + output1 = [] + output2 = [] + output3 = [] + output4 = [] + + if is_first_time: + # step two + # 重现原数据 + # 接block3 + GRU1 = self.GRU1(RepDCBlock3) + GRU1 = tf.keras.layers.BatchNormalization()(GRU1) + d1 = self.d1(GRU1) + # tf.nn.softmax + output1 = self.output1(d1) + # 接block4 + GRU2 = self.GRU2(RepDCBlock4) + GRU2 = tf.keras.layers.BatchNormalization()(GRU2) + d2 = self.d2(GRU2) + # tf.nn.softmax + output2 = self.output2(d2) + # 接block5 + GRU3 = self.GRU3(RepDCBlock5) + GRU3 = tf.keras.layers.BatchNormalization()(GRU3) + d3 = self.d3(GRU3) + # tf.nn.softmax + output3 = self.output3(d3) + else: + GRU1 = self.GRU1(RepDCBlock3) + GRU1 = tf.keras.layers.BatchNormalization()(GRU1) + d1 = self.d1(GRU1) + # tf.nn.softmax + output1 = self.output1(d1) + # 接block4 + GRU2 = self.GRU2(RepDCBlock4) + GRU2 = tf.keras.layers.BatchNormalization()(GRU2) + d2 = self.d2(GRU2) + # tf.nn.softmax + output2 = self.output2(d2) + # 接block5 + GRU3 = self.GRU3(RepDCBlock5) + GRU3 = tf.keras.layers.BatchNormalization()(GRU3) + d3 = self.d3(GRU3) + # tf.nn.softmax + output3 = self.output3(d3) + + # 多尺度动态池化 + # p1 = self.p1(output1) + # B, _, _ = p1.shape + # f1 = tf.reshape(p1, shape=[B, -1]) + # p2 = self.p2(output2) + # f2 = tf.reshape(p2, shape=[B, -1]) + # p3 = self.p3(output3) + # f3 = tf.reshape(p3, shape=[B, -1]) + # step three + # 分类器 + concat3 = tf.concat([output1, output2, output3], axis=1) + # dropout = tf.keras.layers.Dropout(0.25)(concat3) + d4 = self.d4(concat3) + d5 = self.d5(d4) + # d4 = tf.keras.layers.BatchNormalization()(d4) + output4 = self.output4(d5) + + return output1, output2, output3, output4 + + def get_loss(self, inputs_tensor, label1=None, label2=None, is_first_time: bool = True, pred_3=None, pred_4=None, + pred_5=None): + # step one + RepDCBlock1 = self.RepDCBlock1(inputs_tensor) + RepDCBlock1 = tf.keras.layers.BatchNormalization()(RepDCBlock1) + conv1 = self.conv1(RepDCBlock1) + conv1 = tf.nn.leaky_relu(conv1) + conv1 = tf.keras.layers.BatchNormalization()(conv1) + upsample1 = self.upsample1(conv1) + + DACU2 = self.DACU2(upsample1) + DACU2 = tf.keras.layers.BatchNormalization()(DACU2) + RepDCBlock2 = self.RepDCBlock2(DACU2) + RepDCBlock2 = tf.keras.layers.BatchNormalization()(RepDCBlock2) + conv2 = self.conv2(RepDCBlock2) + conv2 = tf.nn.leaky_relu(conv2) + conv2 = tf.keras.layers.BatchNormalization()(conv2) + upsample2 = self.upsample2(conv2) + + DACU3 = self.DACU3(upsample2) + DACU3 = tf.keras.layers.BatchNormalization()(DACU3) + RepDCBlock3 = self.RepDCBlock3(DACU3) + RepDCBlock3 = tf.keras.layers.BatchNormalization()(RepDCBlock3) + conv3 = self.conv3(RepDCBlock3) + conv3 = tf.nn.leaky_relu(conv3) + conv3 = tf.keras.layers.BatchNormalization()(conv3) + + concat1 = tf.concat([conv2, conv3], axis=1) + + DACU4 = self.DACU4(concat1) + DACU4 = tf.keras.layers.BatchNormalization()(DACU4) + RepDCBlock4 = self.RepDCBlock4(DACU4) + RepDCBlock4 = tf.keras.layers.BatchNormalization()(RepDCBlock4) + conv4 = self.conv4(RepDCBlock4) + conv4 = tf.nn.leaky_relu(conv4) + conv4 = tf.keras.layers.BatchNormalization()(conv4) + + concat2 = tf.concat([conv1, conv4], axis=1) + + RepDCBlock5 = self.RepDCBlock5(concat2) + RepDCBlock5 = tf.keras.layers.BatchNormalization()(RepDCBlock5) + + if is_first_time: + # step two + # 重现原数据 + # 接block3 + GRU1 = self.GRU1(RepDCBlock3) + GRU1 = tf.keras.layers.BatchNormalization()(GRU1) + d1 = self.d1(GRU1) + # tf.nn.softmax + output1 = self.output1(d1) + # 接block4 + GRU2 = self.GRU2(RepDCBlock4) + GRU2 = tf.keras.layers.BatchNormalization()(GRU2) + d2 = self.d2(GRU2) + # tf.nn.softmax + output2 = self.output2(d2) + # 接block5 + GRU3 = self.GRU3(RepDCBlock5) + GRU3 = tf.keras.layers.BatchNormalization()(GRU3) + d3 = self.d3(GRU3) + # tf.nn.softmax + output3 = self.output3(d3) + + # reduce_mean降维计算均值 + MSE_loss1 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output1)) + MSE_loss2 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output2)) + MSE_loss3 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output3)) + + # print("MSE_loss1:", MSE_loss1.numpy()) + # print("MSE_loss2:", MSE_loss2.numpy()) + # print("MSE_loss3:", MSE_loss3.numpy()) + loss = MSE_loss1 + MSE_loss2 + MSE_loss3 + Accuracy_num = 0 + + else: + # step two + # 重现原数据 + # 接block3 + GRU1 = self.GRU1(RepDCBlock3) + GRU1 = tf.keras.layers.BatchNormalization()(GRU1) + d1 = self.d1(GRU1) + # tf.nn.softmax + output1 = self.output1(d1) + # 接block4 + GRU2 = self.GRU2(RepDCBlock4) + GRU2 = tf.keras.layers.BatchNormalization()(GRU2) + d2 = self.d2(GRU2) + # tf.nn.softmax + output2 = self.output2(d2) + # 接block5 + GRU3 = self.GRU3(RepDCBlock5) + GRU3 = tf.keras.layers.BatchNormalization()(GRU3) + d3 = self.d3(GRU3) + # tf.nn.softmax + output3 = self.output3(d3) + + # 多尺度动态池化 + # p1 = self.p1(output1) + # B, _, _ = p1.shape + # f1 = tf.reshape(p1, shape=[B, -1]) + # p2 = self.p2(output2) + # f2 = tf.reshape(p2, shape=[B, -1]) + # p3 = self.p3(output3) + # f3 = tf.reshape(p3, shape=[B, -1]) + # step three + # 分类器 + concat3 = tf.concat([output1, output2, output3], axis=1) + # dropout = tf.keras.layers.Dropout(0.25)(concat3) + d4 = self.d4(concat3) + d5 = self.d5(d4) + # d4 = tf.keras.layers.BatchNormalization()(d4) + output4 = self.output4(d5) + + # reduce_mean降维计算均值 + a = 50 + beta = 0.5 * math.cos(min(self.epoch * 2 / self.epochs, 1) * math.pi) + 0.5 + MSE_loss = SmoothL1Loss()(y_true=pred_3, y_pred=output1) + MSE_loss += SmoothL1Loss()(y_true=pred_4, y_pred=output2) + MSE_loss += SmoothL1Loss()(y_true=pred_5, y_pred=output3) + Cross_Entropy_loss = tf.reduce_mean( + tf.losses.binary_crossentropy(y_true=label2, y_pred=output4, from_logits=True)) + + print("MSE_loss:", MSE_loss.numpy()) + print("Cross_Entropy_loss:", Cross_Entropy_loss.numpy()) + Accuracy_num = self.get_Accuracy(label=label2, output=output4) + loss = beta * MSE_loss + a * Cross_Entropy_loss + return loss, Accuracy_num + + def get_Accuracy(self, output, label): + + predict_label = tf.round(output) + label = tf.cast(label, dtype=tf.float32) + + t = np.array(label - predict_label) + + b = t[t[:] == 0] + + return b.__len__() + + def get_grad(self, input_tensor, label1=None, label2=None, is_first_time: bool = True, pred_3=None, pred_4=None, + pred_5=None): + with tf.GradientTape() as tape: + # todo 原本tape只会监控由tf.Variable创建的trainable=True属性 + # tape.watch(self.variables) + L, Accuracy_num = self.get_loss(input_tensor, label1=label1, label2=label2, is_first_time=is_first_time, + pred_3=pred_3, + pred_4=pred_4, pred_5=pred_5) + # 保存一下loss,用于输出 + self.train_loss = L + g = tape.gradient(L, self.variables) + return g, Accuracy_num + + def train(self, input_tensor, label1=None, label2=None, learning_rate=1e-3, is_first_time: bool = True, pred_3=None, + pred_4=None, pred_5=None): + g, Accuracy_num = self.get_grad(input_tensor, label1=label1, label2=label2, is_first_time=is_first_time, + pred_3=pred_3, + pred_4=pred_4, pred_5=pred_5) + optimizers.Adam(learning_rate).apply_gradients(zip(g, self.variables)) + return self.train_loss, Accuracy_num + + # 暂时只支持batch_size等于1,不然要传z比较麻烦 + def get_val_loss(self, val_data, val_label1, val_label2, batch_size=16, is_first_time: bool = True, + step_one_model=None): + val_loss = [] + accuracy_num = 0 + output1 = 0 + output2 = 0 + output3 = 0 + z = 1 + size, length, dims = val_data.shape + if batch_size == None: + batch_size = self.batch_size + for epoch in range(0, size - batch_size, batch_size): + each_val_data = val_data[epoch:epoch + batch_size, :, :] + each_val_label1 = val_label1[epoch:epoch + batch_size, :] + each_val_label2 = val_label2[epoch:epoch + batch_size, ] + # each_val_data = tf.expand_dims(each_val_data, axis=0) + # each_val_query = tf.expand_dims(each_val_query, axis=0) + # each_val_label = tf.expand_dims(each_val_label, axis=0) + if not is_first_time: + output1, output2, output3, _ = step_one_model.call(inputs=each_val_data, is_first_time=True) + + each_loss, each_accuracy_num = self.get_loss(each_val_data, each_val_label1, each_val_label2, + is_first_time=is_first_time, + pred_3=output1, pred_4=output2, pred_5=output3) + accuracy_num += each_accuracy_num + val_loss.append(each_loss) + z += 1 + + val_accuracy = accuracy_num / ((z - 1) * batch_size) + val_total_loss = tf.reduce_mean(val_loss) + return val_total_loss, val_accuracy + + +class RevConv(keras.layers.Layer): + + def __init__(self, kernel_size=3): + # 调用父类__init__()方法 + super(RevConv, self).__init__() + self.kernel_size = kernel_size + + def get_config(self): + # 自定义层里面的属性 + config = ( + { + 'kernel_size': self.kernel_size + } + ) + base_config = super(RevConv, self).get_config() + return dict(list(base_config.items()) + list(config.items())) + + def build(self, input_shape): + # print(input_shape) + _, _, output_dim = input_shape[0], input_shape[1], input_shape[2] + self.conv1 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=self.kernel_size, strides=1, + padding='causal', + dilation_rate=4) + + self.conv2 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=1, strides=1, padding='causal', + dilation_rate=4) + # self.b2 = tf.keras.layers.BatchNormalization() + + # self.b3 = tf.keras.layers.BatchNormalization() + + # out = tf.keras.layers.Add()([b1, b2, b3]) + # out = tf.nn.relu(out) + + def call(self, inputs, **kwargs): + conv1 = self.conv1(inputs) + b1 = tf.keras.layers.BatchNormalization()(conv1) + b1 = tf.nn.leaky_relu(b1) + # b1 = self.b1 + + conv2 = self.conv2(inputs) + b2 = tf.keras.layers.BatchNormalization()(conv2) + b2 = tf.nn.leaky_relu(b2) + + b3 = tf.keras.layers.BatchNormalization()(inputs) + + out = tf.keras.layers.Add()([b1, b2, b3]) + out = tf.nn.relu(out) + + return out + + +class RevConvBlock(keras.layers.Layer): + + def __init__(self, num: int = 3, kernel_size=3): + # 调用父类__init__()方法 + super(RevConvBlock, self).__init__() + self.num = num + self.kernel_size = kernel_size + self.L = [] + for i in range(num): + RepVGG = RevConv(kernel_size=kernel_size) + self.L.append(RepVGG) + + def get_config(self): + # 自定义层里面的属性 + config = ( + { + 'kernel_size': self.kernel_size, + 'num': self.num + } + ) + base_config = super(RevConvBlock, self).get_config() + return dict(list(base_config.items()) + list(config.items())) + + def call(self, inputs, **kwargs): + for i in range(self.num): + inputs = self.L[i](inputs) + return inputs diff --git a/TensorFlow_eaxmple/Model_train_test/model/Joint_Monitoring/__init__.py b/TensorFlow_eaxmple/Model_train_test/model/Joint_Monitoring/__init__.py new file mode 100644 index 0000000..48e28b3 --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/model/Joint_Monitoring/__init__.py @@ -0,0 +1,9 @@ +# _*_ coding: UTF-8 _*_ + + +''' +@Author : dingjiawen +@Date : 2022/7/14 9:40 +@Usage : +@Desc : +''' \ No newline at end of file diff --git a/TensorFlow_eaxmple/Model_train_test/model/Joint_Monitoring/compare/RNet.py b/TensorFlow_eaxmple/Model_train_test/model/Joint_Monitoring/compare/RNet.py new file mode 100644 index 0000000..96626c6 --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/model/Joint_Monitoring/compare/RNet.py @@ -0,0 +1,447 @@ +# _*_ coding: UTF-8 _*_ + + +''' +@Author : dingjiawen +@Date : 2022/7/14 9:40 +@Usage : 联合监测模型 +@Desc : RNet:去除掉DCAU +''' + +import tensorflow as tf +import tensorflow.keras as keras +from tensorflow.keras import * +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt +from model.DepthwiseCon1D.DepthwiseConv1D import DepthwiseConv1D +from model.Dynamic_channelAttention.Dynamic_channelAttention import DynamicChannelAttention, DynamicPooling +from condition_monitoring.data_deal import loadData +from model.LossFunction.smooth_L1_Loss import SmoothL1Loss + + +class Joint_Monitoring(keras.Model): + + def __init__(self, conv_filter=20): + # 调用父类__init__()方法 + super(Joint_Monitoring, self).__init__() + # step one + self.RepDCBlock1 = RevConvBlock(num=3, kernel_size=5) + self.conv1 = tf.keras.layers.Conv1D(filters=conv_filter, kernel_size=1, strides=2, padding='SAME') + self.upsample1 = tf.keras.layers.UpSampling1D(size=2) + + # self.DACU2 = DynamicChannelAttention() + self.RepDCBlock2 = RevConvBlock(num=3, kernel_size=3) + self.conv2 = tf.keras.layers.Conv1D(filters=2 * conv_filter, kernel_size=1, strides=2, padding='SAME') + self.upsample2 = tf.keras.layers.UpSampling1D(size=2) + + # self.DACU3 = DynamicChannelAttention() + self.RepDCBlock3 = RevConvBlock(num=3, kernel_size=3) + self.p1 = DynamicPooling(pool_size=2) + self.conv3 = tf.keras.layers.Conv1D(filters=2 * conv_filter, kernel_size=3, strides=2, padding='SAME') + + # self.DACU4 = DynamicChannelAttention() + self.RepDCBlock4 = RevConvBlock(num=3, kernel_size=3) + self.p2 = DynamicPooling(pool_size=4) + self.conv4 = tf.keras.layers.Conv1D(filters=conv_filter, kernel_size=3, strides=2, padding='SAME') + + self.RepDCBlock5 = RevConvBlock(num=3, kernel_size=3) + self.p3 = DynamicPooling(pool_size=2) + + # step two + # 重现原数据 + self.GRU1 = tf.keras.layers.GRU(128, return_sequences=False) + self.d1 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu) + self.output1 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu) + + self.GRU2 = tf.keras.layers.GRU(128, return_sequences=False) + self.d2 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu) + self.output2 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu) + + self.GRU3 = tf.keras.layers.GRU(128, return_sequences=False) + self.d3 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu) + self.output3 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu) + + + # loss + self.train_loss = [] + + def call(self, inputs, training=None, mask=None, is_first_time: bool = True): + # step one + RepDCBlock1 = self.RepDCBlock1(inputs) + RepDCBlock1 = tf.keras.layers.BatchNormalization()(RepDCBlock1) + conv1 = self.conv1(RepDCBlock1) + conv1 = tf.nn.leaky_relu(conv1) + conv1 = tf.keras.layers.BatchNormalization()(conv1) + upsample1 = self.upsample1(conv1) + + # DACU2 = self.DACU2(upsample1) + DACU2 = tf.keras.layers.BatchNormalization()(upsample1) + RepDCBlock2 = self.RepDCBlock2(DACU2) + RepDCBlock2 = tf.keras.layers.BatchNormalization()(RepDCBlock2) + conv2 = self.conv2(RepDCBlock2) + conv2 = tf.nn.leaky_relu(conv2) + conv2 = tf.keras.layers.BatchNormalization()(conv2) + upsample2 = self.upsample2(conv2) + + # DACU3 = self.DACU3(upsample2) + DACU3 = tf.keras.layers.BatchNormalization()(upsample2) + RepDCBlock3 = self.RepDCBlock3(DACU3) + RepDCBlock3 = tf.keras.layers.BatchNormalization()(RepDCBlock3) + conv3 = self.conv3(RepDCBlock3) + conv3 = tf.nn.leaky_relu(conv3) + conv3 = tf.keras.layers.BatchNormalization()(conv3) + + concat1 = tf.concat([conv2, conv3], axis=1) + + # DACU4 = self.DACU4(concat1) + DACU4 = tf.keras.layers.BatchNormalization()(concat1) + RepDCBlock4 = self.RepDCBlock4(DACU4) + RepDCBlock4 = tf.keras.layers.BatchNormalization()(RepDCBlock4) + conv4 = self.conv4(RepDCBlock4) + conv4 = tf.nn.leaky_relu(conv4) + conv4 = tf.keras.layers.BatchNormalization()(conv4) + + concat2 = tf.concat([conv1, conv4], axis=1) + + RepDCBlock5 = self.RepDCBlock5(concat2) + RepDCBlock5 = tf.keras.layers.BatchNormalization()(RepDCBlock5) + + output1 = [] + output2 = [] + output3 = [] + output4 = [] + + if is_first_time: + # step two + # 重现原数据 + # 接block3 + GRU1 = self.GRU1(RepDCBlock3) + GRU1 = tf.keras.layers.BatchNormalization()(GRU1) + d1 = self.d1(GRU1) + # tf.nn.softmax + output1 = self.output1(d1) + # 接block4 + GRU2 = self.GRU2(RepDCBlock4) + GRU2 = tf.keras.layers.BatchNormalization()(GRU2) + d2 = self.d2(GRU2) + # tf.nn.softmax + output2 = self.output2(d2) + # 接block5 + GRU3 = self.GRU3(RepDCBlock5) + GRU3 = tf.keras.layers.BatchNormalization()(GRU3) + d3 = self.d3(GRU3) + # tf.nn.softmax + output3 = self.output3(d3) + else: + GRU1 = self.GRU1(RepDCBlock3) + GRU1 = tf.keras.layers.BatchNormalization()(GRU1) + d1 = self.d1(GRU1) + # tf.nn.softmax + output1 = self.output1(d1) + # 接block4 + GRU2 = self.GRU2(RepDCBlock4) + GRU2 = tf.keras.layers.BatchNormalization()(GRU2) + d2 = self.d2(GRU2) + # tf.nn.softmax + output2 = self.output2(d2) + # 接block5 + GRU3 = self.GRU3(RepDCBlock5) + GRU3 = tf.keras.layers.BatchNormalization()(GRU3) + d3 = self.d3(GRU3) + # tf.nn.softmax + output3 = self.output3(d3) + + # 多尺度动态池化 + # p1 = self.p1(output1) + # B, _, _ = p1.shape + # f1 = tf.reshape(p1, shape=[B, -1]) + # p2 = self.p2(output2) + # f2 = tf.reshape(p2, shape=[B, -1]) + # p3 = self.p3(output3) + # f3 = tf.reshape(p3, shape=[B, -1]) + # step three + # 分类器 + concat3 = tf.concat([output1, output2, output3], axis=1) + # dropout = tf.keras.layers.Dropout(0.25)(concat3) + d4 = self.d4(concat3) + d5 = self.d5(d4) + # d4 = tf.keras.layers.BatchNormalization()(d4) + output4 = self.output4(d5) + + return output1, output2, output3, output4 + + def get_loss(self, inputs_tensor, label1=None, label2=None, is_first_time: bool = True, pred_3=None, pred_4=None, + pred_5=None): + # step one + RepDCBlock1 = self.RepDCBlock1(inputs_tensor) + RepDCBlock1 = tf.keras.layers.BatchNormalization()(RepDCBlock1) + conv1 = self.conv1(RepDCBlock1) + conv1 = tf.nn.leaky_relu(conv1) + conv1 = tf.keras.layers.BatchNormalization()(conv1) + upsample1 = self.upsample1(conv1) + + # DACU2 = self.DACU2(upsample1) + DACU2 = tf.keras.layers.BatchNormalization()(upsample1) + RepDCBlock2 = self.RepDCBlock2(DACU2) + RepDCBlock2 = tf.keras.layers.BatchNormalization()(RepDCBlock2) + conv2 = self.conv2(RepDCBlock2) + conv2 = tf.nn.leaky_relu(conv2) + conv2 = tf.keras.layers.BatchNormalization()(conv2) + upsample2 = self.upsample2(conv2) + + # DACU3 = self.DACU3(upsample2) + DACU3 = tf.keras.layers.BatchNormalization()(upsample2) + RepDCBlock3 = self.RepDCBlock3(DACU3) + RepDCBlock3 = tf.keras.layers.BatchNormalization()(RepDCBlock3) + conv3 = self.conv3(RepDCBlock3) + conv3 = tf.nn.leaky_relu(conv3) + conv3 = tf.keras.layers.BatchNormalization()(conv3) + + concat1 = tf.concat([conv2, conv3], axis=1) + + # DACU4 = self.DACU4(concat1) + DACU4 = tf.keras.layers.BatchNormalization()(concat1) + RepDCBlock4 = self.RepDCBlock4(DACU4) + RepDCBlock4 = tf.keras.layers.BatchNormalization()(RepDCBlock4) + conv4 = self.conv4(RepDCBlock4) + conv4 = tf.nn.leaky_relu(conv4) + conv4 = tf.keras.layers.BatchNormalization()(conv4) + + concat2 = tf.concat([conv1, conv4], axis=1) + + RepDCBlock5 = self.RepDCBlock5(concat2) + RepDCBlock5 = tf.keras.layers.BatchNormalization()(RepDCBlock5) + + if is_first_time: + # step two + # 重现原数据 + # 接block3 + GRU1 = self.GRU1(RepDCBlock3) + GRU1 = tf.keras.layers.BatchNormalization()(GRU1) + d1 = self.d1(GRU1) + # tf.nn.softmax + output1 = self.output1(d1) + # 接block4 + GRU2 = self.GRU2(RepDCBlock4) + GRU2 = tf.keras.layers.BatchNormalization()(GRU2) + d2 = self.d2(GRU2) + # tf.nn.softmax + output2 = self.output2(d2) + # 接block5 + GRU3 = self.GRU3(RepDCBlock5) + GRU3 = tf.keras.layers.BatchNormalization()(GRU3) + d3 = self.d3(GRU3) + # tf.nn.softmax + output3 = self.output3(d3) + + # reduce_mean降维计算均值 + MSE_loss1 = SmoothL1Loss()(y_true=label1, y_pred=output1) + MSE_loss2 = SmoothL1Loss()(y_true=label1, y_pred=output2) + MSE_loss3 = SmoothL1Loss()(y_true=label1, y_pred=output3) + # MSE_loss1 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output1)) + # MSE_loss2 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output2)) + # MSE_loss3 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output3)) + + print("MSE_loss1:", MSE_loss1.numpy()) + print("MSE_loss2:", MSE_loss2.numpy()) + print("MSE_loss3:", MSE_loss3.numpy()) + loss = MSE_loss1 + MSE_loss2 + MSE_loss3 + Accuracy_num = 0 + + else: + # step two + # 重现原数据 + # 接block3 + GRU1 = self.GRU1(RepDCBlock3) + GRU1 = tf.keras.layers.BatchNormalization()(GRU1) + d1 = self.d1(GRU1) + # tf.nn.softmax + output1 = self.output1(d1) + # 接block4 + GRU2 = self.GRU2(RepDCBlock4) + GRU2 = tf.keras.layers.BatchNormalization()(GRU2) + d2 = self.d2(GRU2) + # tf.nn.softmax + output2 = self.output2(d2) + # 接block5 + GRU3 = self.GRU3(RepDCBlock5) + GRU3 = tf.keras.layers.BatchNormalization()(GRU3) + d3 = self.d3(GRU3) + # tf.nn.softmax + output3 = self.output3(d3) + + # 多尺度动态池化 + # p1 = self.p1(output1) + # B, _, _ = p1.shape + # f1 = tf.reshape(p1, shape=[B, -1]) + # p2 = self.p2(output2) + # f2 = tf.reshape(p2, shape=[B, -1]) + # p3 = self.p3(output3) + # f3 = tf.reshape(p3, shape=[B, -1]) + # step three + # 分类器 + concat3 = tf.concat([output1, output2, output3], axis=1) + # dropout = tf.keras.layers.Dropout(0.25)(concat3) + d4 = self.d4(concat3) + d5 = self.d5(d4) + # d4 = tf.keras.layers.BatchNormalization()(d4) + output4 = self.output4(d5) + + # reduce_mean降维计算均值 + MSE_loss = SmoothL1Loss()(y_true=pred_3, y_pred=output1) + MSE_loss += SmoothL1Loss()(y_true=pred_4, y_pred=output2) + MSE_loss += SmoothL1Loss()(y_true=pred_5, y_pred=output3) + Cross_Entropy_loss = tf.reduce_mean( + tf.losses.binary_crossentropy(y_true=label2, y_pred=output4, from_logits=True)) + + print("MSE_loss:", MSE_loss.numpy()) + print("Cross_Entropy_loss:", Cross_Entropy_loss.numpy()) + Accuracy_num = self.get_Accuracy(label=label2, output=output4) + loss = MSE_loss + Cross_Entropy_loss + return loss, Accuracy_num + + def get_Accuracy(self, output, label): + + predict_label = tf.round(output) + label = tf.cast(label, dtype=tf.float32) + + t = np.array(label - predict_label) + + b = t[t[:] == 0] + + return b.__len__() + + def get_grad(self, input_tensor, label1=None, label2=None, is_first_time: bool = True, pred_3=None, pred_4=None, + pred_5=None): + with tf.GradientTape() as tape: + # todo 原本tape只会监控由tf.Variable创建的trainable=True属性 + # tape.watch(self.variables) + L, Accuracy_num = self.get_loss(input_tensor, label1=label1, label2=label2, is_first_time=is_first_time, + pred_3=pred_3, + pred_4=pred_4, pred_5=pred_5) + # 保存一下loss,用于输出 + self.train_loss = L + g = tape.gradient(L, self.variables) + return g, Accuracy_num + + def train(self, input_tensor, label1=None, label2=None, learning_rate=1e-3, is_first_time: bool = True, pred_3=None, + pred_4=None, pred_5=None): + g, Accuracy_num = self.get_grad(input_tensor, label1=label1, label2=label2, is_first_time=is_first_time, + pred_3=pred_3, + pred_4=pred_4, pred_5=pred_5) + optimizers.Adam(learning_rate).apply_gradients(zip(g, self.variables)) + return self.train_loss, Accuracy_num + + # 暂时只支持batch_size等于1,不然要传z比较麻烦 + def get_val_loss(self, val_data, val_label1, val_label2, batch_size=16, is_first_time: bool = True, + step_one_model=None): + val_loss = [] + accuracy_num = 0 + output1 = 0 + output2 = 0 + output3 = 0 + z = 1 + size, length, dims = val_data.shape + if batch_size == None: + batch_size = self.batch_size + for epoch in range(0, size - batch_size, batch_size): + each_val_data = val_data[epoch:epoch + batch_size, :, :] + each_val_label1 = val_label1[epoch:epoch + batch_size, :] + each_val_label2 = val_label2[epoch:epoch + batch_size, ] + # each_val_data = tf.expand_dims(each_val_data, axis=0) + # each_val_query = tf.expand_dims(each_val_query, axis=0) + # each_val_label = tf.expand_dims(each_val_label, axis=0) + if not is_first_time: + output1, output2, output3, _ = step_one_model.call(inputs=each_val_data, is_first_time=True) + + each_loss, each_accuracy_num = self.get_loss(each_val_data, each_val_label1, each_val_label2, + is_first_time=is_first_time, + pred_3=output1, pred_4=output2, pred_5=output3) + accuracy_num += each_accuracy_num + val_loss.append(each_loss) + z += 1 + + val_accuracy = accuracy_num / ((z-1) * batch_size) + val_total_loss = tf.reduce_mean(val_loss) + return val_total_loss, val_accuracy + + +class RevConv(keras.layers.Layer): + + def __init__(self, kernel_size=3): + # 调用父类__init__()方法 + super(RevConv, self).__init__() + self.kernel_size = kernel_size + + def get_config(self): + # 自定义层里面的属性 + config = ( + { + 'kernel_size': self.kernel_size + } + ) + base_config = super(RevConv, self).get_config() + return dict(list(base_config.items()) + list(config.items())) + + def build(self, input_shape): + # print(input_shape) + _, _, output_dim = input_shape[0], input_shape[1], input_shape[2] + self.conv1 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=self.kernel_size, strides=1, + padding='causal', + dilation_rate=4) + + self.conv2 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=1, strides=1, padding='causal', + dilation_rate=4) + # self.b2 = tf.keras.layers.BatchNormalization() + + # self.b3 = tf.keras.layers.BatchNormalization() + + # out = tf.keras.layers.Add()([b1, b2, b3]) + # out = tf.nn.relu(out) + + def call(self, inputs, **kwargs): + conv1 = self.conv1(inputs) + b1 = tf.keras.layers.BatchNormalization()(conv1) + b1 = tf.nn.leaky_relu(b1) + # b1 = self.b1 + + conv2 = self.conv2(inputs) + b2 = tf.keras.layers.BatchNormalization()(conv2) + b2 = tf.nn.leaky_relu(b2) + + b3 = tf.keras.layers.BatchNormalization()(inputs) + + out = tf.keras.layers.Add()([b1, b2, b3]) + out = tf.nn.relu(out) + + return out + + +class RevConvBlock(keras.layers.Layer): + + def __init__(self, num: int = 3, kernel_size=3): + # 调用父类__init__()方法 + super(RevConvBlock, self).__init__() + self.num = num + self.kernel_size = kernel_size + self.L = [] + for i in range(num): + RepVGG = RevConv(kernel_size=kernel_size) + self.L.append(RepVGG) + + def get_config(self): + # 自定义层里面的属性 + config = ( + { + 'kernel_size': self.kernel_size, + 'num': self.num + } + ) + base_config = super(RevConvBlock, self).get_config() + return dict(list(base_config.items()) + list(config.items())) + + def call(self, inputs, **kwargs): + for i in range(self.num): + inputs = self.L[i](inputs) + return inputs diff --git a/TensorFlow_eaxmple/Model_train_test/model/Joint_Monitoring/compare/__init__.py b/TensorFlow_eaxmple/Model_train_test/model/Joint_Monitoring/compare/__init__.py new file mode 100644 index 0000000..00271b1 --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/model/Joint_Monitoring/compare/__init__.py @@ -0,0 +1,10 @@ +# -*- coding: utf-8 -*- + +# coding: utf-8 + +''' +@Author : dingjiawen +@Date : 2022/10/11 20:30 +@Usage : +@Desc : +''' \ No newline at end of file diff --git a/TensorFlow_eaxmple/Model_train_test/model/LRU/lru.py b/TensorFlow_eaxmple/Model_train_test/model/LRU/lru.py index f9045f4..735e383 100644 --- a/TensorFlow_eaxmple/Model_train_test/model/LRU/lru.py +++ b/TensorFlow_eaxmple/Model_train_test/model/LRU/lru.py @@ -2,7 +2,14 @@ # 线性循环单元(Linear Recurrent Unit) # tensorflow 1.15 + bert4keras 0.11.4 测试通过 -from bert4keras.layers import * +# from bert4keras.layers import * +import numpy as np +import tensorflow.keras.backend as K +import tensorflow as tf +import tensorflow.keras.layers as layers +import tensorflow.keras.activations as activations +import tensorflow.keras.initializers as initializers +from tensorflow.keras.layers import Dense,Layer class LRU(Layer): @@ -26,7 +33,7 @@ class LRU(Layer): self.unroll = unroll self.kernel_initializer = initializers.get(kernel_initializer) - @integerize_shape + def build(self, input_shape): super(LRU, self).build(input_shape) hidden_size = input_shape[-1] @@ -57,7 +64,7 @@ class LRU(Layer): name='params_log', shape=(3, self.units), initializer=initializer ) - @recompute_grad + def call(self, inputs, mask=None): u = self.i_dense(inputs) params = K.exp(self.params_log) @@ -70,7 +77,7 @@ class LRU(Layer): else: L_in = K.shape(u)[1] log2_L = K.log(K.cast(L_in, K.floatx())) / K.log(2.) - log2_L = K.cast(tf.ceil(log2_L), 'int32') + log2_L = K.cast(tf.math.ceil(log2_L), 'int32') u = tf.complex(u[..., ::2], u[..., 1::2]) u = tf.pad(u, [[0, 0], [0, 2**log2_L - K.shape(u)[1]], [0, 0]]) @@ -101,7 +108,7 @@ class LRU(Layer): _, x = tf.while_loop(lambda i, x: i <= log2_L, lru, [1, u]) x = x[:, :L_in] * tf.complex(gamma, 0.) - x = K.concatenate([tf.real(x), tf.imag(x)], axis=-1) + x = K.concatenate([tf.math.real(x), tf.math.imag(x)], axis=-1) return self.o_dense(x) def get_config(self): diff --git a/TensorFlow_eaxmple/Model_train_test/model/LSTM/LSTM.py b/TensorFlow_eaxmple/Model_train_test/model/LSTM/LSTM.py new file mode 100644 index 0000000..d3db9fc --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/model/LSTM/LSTM.py @@ -0,0 +1,116 @@ +# -*- encoding:utf-8 -*- + +''' +@Author : dingjiawen +@Date : 2023/6/14 13:49 +@Usage : +@Desc : 标准版LSTM +''' + +import tensorflow as tf +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt +from tensorflow.keras.layers import Dense, Conv2D, Conv1D + +from tensorflow.keras import * +import tensorflow.keras.layers as layers + + +class LSTMLayer(layers.Layer): + # 定义两个权重初始化方法,方便后续调用 + k_ini = initializers.GlorotUniform() + b_ini = initializers.Zeros() + + def __init__(self, units=30, return_sequences: bool = False, **kwargs): + super(LSTMLayer, self).__init__() + self.units = units + self.return_sequences = return_sequences + + def get_params(self, num_inputs, num_outputs): + def _one(shape, name): + # return tf.Variable(tf.random.normal(shape=shape, stddev=0.01, mean=0, dtype=tf.float32)) + return self.add_weight(shape=shape, name=name, initializer=tf.random_normal_initializer) + + def _three(name1, name2): + return (_one(shape=(num_inputs + num_outputs, num_outputs), name=name1), + self.add_weight(shape=(num_outputs,), name=name2, + initializer=tf.zeros_initializer)) + + W_i, b_i = _three("W_i", "b_i") # 输入门参数 + W_f, b_f = _three("W_f", "b_f") # 遗忘门参数 + W_o, b_o = _three("W_o", "b_o") # 输出门参数 + W_c, b_c = _three("W_c", "b_c") # 候选记忆细胞参数 + + # 输出层参数 + return W_i, b_i, W_f, b_f, W_o, b_o, W_c, b_c + + def get_config(self): + # 自定义层里面的属性 + config = ( + { + 'units': self.units, + 'return_sequences': self.return_sequences + } + ) + base_config = super(LSTMLayer, self).get_config() + return dict(list(base_config.items()) + list(config.items())) + + def build(self, input_shape): + num_inputs, num_outputs = input_shape[-1], self.units + + self.W_i, self.b_i, self.W_f, self.b_f, self.W_o, self.b_o, self.W_c, self.b_c = self.get_params( + num_inputs=num_inputs, num_outputs=num_outputs) + pass + + def call(self, inputs, **kwargs): + epoch, hiddens, dims = inputs.shape + # print(filter_num, dims) + + for hidden in range(hiddens): + new_input = inputs[:, hidden, :] + new_input = tf.expand_dims(new_input, axis=1) + + if hidden != 0: + new_input = tf.concat([new_input, ht_1], axis=-1) + else: + new_input = tf.pad(new_input, [[0, 0], [0, 0], [0, self.units]]) + + Wi = tf.matmul(new_input, self.W_i) + self.b_i + Wf = tf.matmul(new_input, self.W_f) + self.b_f + Wc = tf.matmul(new_input, self.W_c) + self.b_c + Wo = tf.matmul(new_input, self.W_o) + self.b_o + + ft = tf.nn.sigmoid(Wf) + it = tf.nn.sigmoid(Wi) + ct_ = tf.nn.tanh(Wc) + ot = tf.nn.sigmoid(Wo) + + if hidden != 0: + ct = tf.add(tf.multiply(ft, ct_1), tf.multiply(it, ct_)) + else: + ct = tf.multiply(it, ct_) + ht = tf.multiply(tf.nn.tanh(ct), ot) + + if self.return_sequences: + if hidden == 0: + output = ht + else: + output = tf.concat([output, ht], axis=1) + else: + if hidden == hiddens-1: + output = tf.squeeze(ht,axis=1) + + ht_1 = ht + ct_1 = ct + + # output = tf.reshape(output, [-1, filter_num, units]) + + # print(output.shape) + return output + + +if __name__ == '__main__': + pass + +# tf.keras.layers.LSTM(return_sequences=) diff --git a/TensorFlow_eaxmple/Model_train_test/model/LSTM/LSTMByDense.py b/TensorFlow_eaxmple/Model_train_test/model/LSTM/LSTMByDense.py new file mode 100644 index 0000000..af6e220 --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/model/LSTM/LSTMByDense.py @@ -0,0 +1,110 @@ +# -*- encoding:utf-8 -*- + +''' +@Author : dingjiawen +@Date : 2023/6/14 13:49 +@Usage : +@Desc : 标准版LSTM 使用网络层实现加速 +''' + +import tensorflow as tf +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt +from tensorflow.keras.layers import Dense, Conv2D, Conv1D + +from tensorflow.keras import * +import tensorflow.keras.layers as layers + + +class LSTMLayer(layers.Layer): + # 定义两个权重初始化方法,方便后续调用 + k_ini = initializers.GlorotUniform() + b_ini = initializers.Zeros() + + def __init__(self, units=30, return_sequences: bool = False, **kwargs): + super(LSTMLayer, self).__init__() + self.units = units + self.return_sequences = return_sequences + + def get_params(self, num_outputs): + + def _three(): + return Dense(num_outputs) + + W_i = _three() # 输入门参数 + W_f = _three() # 遗忘门参数 + W_o = _three() # 输出门参数 + W_c = _three() # 候选记忆细胞参数 + + # 输出层参数 + return W_i, W_f, W_o, W_c + + def get_config(self): + # 自定义层里面的属性 + config = ( + { + 'units': self.units, + 'return_sequences': self.return_sequences + } + ) + base_config = super(LSTMLayer, self).get_config() + return dict(list(base_config.items()) + list(config.items())) + + def build(self, input_shape): + num_inputs, num_outputs = input_shape[-1], self.units + + self.W_i, self.W_f, self.W_o, self.W_c = self.get_params(num_outputs=num_outputs) + pass + + def call(self, inputs, **kwargs): + epoch, hiddens, dims = inputs.shape + # print(filter_num, dims) + + for hidden in range(hiddens): + new_input = inputs[:, hidden, :] + new_input = tf.expand_dims(new_input, axis=1) + + if hidden != 0: + new_input = tf.concat([new_input, ht_1], axis=-1) + else: + new_input = tf.pad(new_input, [[0, 0], [0, 0], [0, self.units]]) + + Wi = self.W_i(new_input) + Wf = self.W_f(new_input) + Wc = self.W_c(new_input) + Wo = self.W_o(new_input) + + ft = tf.nn.sigmoid(Wf) + it = tf.nn.sigmoid(Wi) + ct_ = tf.nn.tanh(Wc) + ot = tf.nn.sigmoid(Wo) + + if hidden != 0: + ct = tf.add(tf.multiply(ft, ct_1), tf.multiply(it, ct_)) + else: + ct = tf.multiply(it, ct_) + ht = tf.multiply(tf.nn.tanh(ct), ot) + + if self.return_sequences: + if hidden == 0: + output = ht + else: + output = tf.concat([output, ht], axis=1) + else: + if hidden == hiddens - 1: + output = tf.squeeze(ht, axis=1) + + ht_1 = ht + ct_1 = ct + + # output = tf.reshape(output, [-1, filter_num, units]) + + # print(output.shape) + return output + + +if __name__ == '__main__': + pass + +# tf.keras.layers.LSTM(return_sequences=) diff --git a/TensorFlow_eaxmple/Model_train_test/model/LSTM/__init__.py b/TensorFlow_eaxmple/Model_train_test/model/LSTM/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/TensorFlow_eaxmple/Model_train_test/model/LSTM/before/LSTM_realize_self.py b/TensorFlow_eaxmple/Model_train_test/model/LSTM/before/LSTM_realize_self.py new file mode 100644 index 0000000..93e33e6 --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/model/LSTM/before/LSTM_realize_self.py @@ -0,0 +1,300 @@ +import tensorflow as tf +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt +from tensorflow.keras.layers import Dense, Conv2D, Conv1D + + +class LSTM_realize(): + + def __init__(self, input=np.empty(shape=[100, 1]), filters=100, batch_size=10, if_SA=False): + self.input = input + self.filters = filters + self.batch_size = batch_size + self.if_SA = if_SA + + def getLayer(self, layer='LSTM', query=None): + + if layer == 'LSTM': + layer = self.LSTM_layer() + return layer + + elif layer == 'ConvLSTM': + layer = self.convLSTM() + return layer + + elif layer == 'SA_LSTM': + layer = self.SA_LSTM(query) + return layer + + elif layer == 'SA_ConvLSTM': + self.if_SA = True + layer = self.SA_ConvLSTM(query) + return layer + + else: + raise ValueError("算法尚未实现") + + def LSTM_layer(self): + input = self.input + batch_size = self.batch_size + + epoch, seq, filter_num = input.shape + print(seq, filter_num) + + ct_1 = tf.zeros(shape=[1, 1]) + ht_1 = tf.zeros(shape=[1, 1]) + + for i in range(batch_size): + output = [] + + for batch in range(filter_num): + + new_input = input[0, :, batch] + new_input = tf.expand_dims(new_input, axis=0) + + if batch != 0: + new_input = tf.concat([new_input, ht_1], axis=-1) + + Wi = Dense(1)(new_input) + Wf = Dense(1)(new_input) + Wc = Dense(1)(new_input) + Wo = Dense(1)(new_input) + + ft = tf.nn.sigmoid(Wf) + it = tf.nn.sigmoid(Wi) + ct_ = tf.nn.tanh(Wc) + ot = tf.nn.sigmoid(Wo) + + ct = tf.add(tf.multiply(ft, ct_1), tf.multiply(it, ct_)) + ht = tf.multiply(tf.nn.tanh(ct), ot) + if batch == 0: + output = ht + else: + output = tf.concat([output, ht], axis=1) + ht_1 = ht + ct_1 = ct + + if i == 0: + sum = output + else: + sum = tf.concat([sum, output], axis=0) + + output = tf.reshape(sum, [batch_size, filter_num]) + # output=tf.expand_dims(output,axis=0) + + print(output.shape) + return output + + def ConvLSTM(self): + input = self.input + batch_size = self.batch_size + + epoch, seq, filter_num = input.shape + print(seq, filter_num) + + ct_1 = tf.zeros(shape=[1, 1]) + ht_1 = tf.zeros(shape=[1, 1]) + + for i in range(batch_size): + output = [] + + for batch in range(filter_num): + + new_input = input[0, :, batch] + new_input = tf.expand_dims(new_input, axis=0) + + new_input = tf.transpose(new_input, [1, 0]) + new_input = tf.expand_dims(new_input, axis=0) + + if batch != 0: + new_input = tf.concat([new_input, ht_1], axis=-1) + + Wi = Conv1D(1, kernel_size=3, padding='SAME')(new_input) + Wf = Conv1D(1, kernel_size=3, padding='SAME')(new_input) + Wc = Conv1D(1, kernel_size=3, padding='SAME')(new_input) + Wo = Conv1D(1, kernel_size=3, padding='SAME')(new_input) + + ft = tf.nn.sigmoid(Wf) + it = tf.nn.sigmoid(Wi) + ct_ = tf.nn.tanh(Wc) + ot = tf.nn.sigmoid(Wo) + + ct = tf.add(tf.multiply(ft, ct_1), tf.multiply(it, ct_)) + ht = tf.multiply(tf.nn.tanh(ct), ot) + if batch == 0: + output = ht + else: + output = tf.concat([output, ht], axis=-1) + ht_1 = ht + ct_1 = ct + + if i == 0: + sum = output + else: + sum = tf.concat([sum, output], axis=0) + + output = tf.reshape(sum, [batch_size, filter_num]) + # output=tf.expand_dims(output,axis=0) + + print(output.shape) + return output + + def SA_LSTM(self, query): + + self.query = query + + input = self.input + batch_size = self.batch_size + + epoch, seq, filter_num = input.shape + print(seq, filter_num) + + ct_1 = tf.zeros(shape=[1, 1]) + ht_1 = tf.zeros(shape=[1, 1]) + + for i in range(batch_size): + output = [] + + for batch in range(filter_num): + + new_input = input[0, :, batch] + new_input = tf.expand_dims(new_input, axis=0) + + # new_input = tf.transpose(new_input, [1, 0]) + # new_input = tf.expand_dims(new_input, axis=0) + + if batch != 0: + new_input = tf.concat([new_input, ht_1], axis=-1) + + # new_input.shape=(1,50) + # self_attention模块 + temp = tf.expand_dims(query[i][batch], axis=0) + query_cell=tf.expand_dims(temp,axis=1) + new_input1 = tf.concat([new_input, query_cell], axis=1) + (_, new_input_dims) = new_input1.shape + temp1 = Dense(new_input_dims)(new_input1) + temp1=temp1[:,:-1] + + temp2 = tf.nn.tanh(temp1) + + Si = Dense(new_input_dims-1)(temp2) + ai = tf.nn.softmax(Si) + ones=tf.ones(shape=ai.shape) + + new_input = tf.multiply((ai + ones), new_input) + + Wi = Dense(1)(new_input) + Wf = Dense(1)(new_input) + Wc = Dense(1)(new_input) + Wo = Dense(1)(new_input) + + # Wi = Conv1D(1, kernel_size=3, padding='SAME')(new_input) + # Wf = Conv1D(1, kernel_size=3, padding='SAME')(new_input) + # Wc = Conv1D(1, kernel_size=3, padding='SAME')(new_input) + # Wo = Conv1D(1, kernel_size=3, padding='SAME')(new_input) + + ft = tf.nn.sigmoid(Wf) + it = tf.nn.sigmoid(Wi) + ct_ = tf.nn.tanh(Wc) + ot = tf.nn.sigmoid(Wo) + + ct = tf.add(tf.multiply(ft, ct_1), tf.multiply(it, ct_)) + ht = tf.multiply(tf.nn.tanh(ct), ot) + if batch == 0: + output = ht + else: + output = tf.concat([output, ht], axis=-1) + ht_1 = ht + ct_1 = ct + + if i == 0: + sum = output + else: + sum = tf.concat([sum, output], axis=0) + + output = tf.reshape(sum, [batch_size, filter_num]) + # output=tf.expand_dims(output,axis=0) + + print(output.shape) + return output + + def SA_ConvLSTM(self, query): + + self.query = query + + input = self.input + batch_size = self.batch_size + + epoch, seq, filter_num = input.shape + print(seq, filter_num) + + ct_1 = tf.zeros(shape=[1, 1]) + ht_1 = tf.zeros(shape=[1, 1]) + + for i in range(batch_size): + output = [] + + for batch in range(filter_num): + + new_input = input[0, :, batch] + new_input = tf.expand_dims(new_input, axis=0) + + # new_input = tf.transpose(new_input, [1, 0]) + # new_input = tf.expand_dims(new_input, axis=0) + + if batch != 0: + new_input = tf.concat([new_input, ht_1], axis=-1) + + # new_input.shape=(1,50) + # self_attention模块 + temp = tf.expand_dims(query[i][batch], axis=0) + query_cell=tf.expand_dims(temp,axis=1) + new_input1 = tf.concat([new_input, query_cell], axis=1) + (_, new_input_dims) = new_input1.shape + temp1 = Dense(new_input_dims)(new_input1) + temp1=temp1[:,:-1] + + temp2 = tf.nn.tanh(temp1) + + Si = Dense(new_input_dims-1)(temp2) + ai = tf.nn.softmax(Si) + ones=tf.ones(shape=ai.shape) + + new_input = tf.multiply((ai + ones), new_input) + + + + Wi = Conv1D(1, kernel_size=3, padding='SAME')(new_input) + Wf = Conv1D(1, kernel_size=3, padding='SAME')(new_input) + Wc = Conv1D(1, kernel_size=3, padding='SAME')(new_input) + Wo = Conv1D(1, kernel_size=3, padding='SAME')(new_input) + + ft = tf.nn.sigmoid(Wf) + it = tf.nn.sigmoid(Wi) + ct_ = tf.nn.tanh(Wc) + ot = tf.nn.sigmoid(Wo) + + ct = tf.add(tf.multiply(ft, ct_1), tf.multiply(it, ct_)) + ht = tf.multiply(tf.nn.tanh(ct), ot) + if batch == 0: + output = ht + else: + output = tf.concat([output, ht], axis=-1) + ht_1 = ht + ct_1 = ct + + if i == 0: + sum = output + else: + sum = tf.concat([sum, output], axis=0) + + output = tf.reshape(sum, [batch_size, filter_num]) + # output=tf.expand_dims(output,axis=0) + + print(output.shape) + return output + +# if __name__ == '__main__': +# input=tf.random.truncated_normal(shape=[5,10]) +# LSTM(input=input).getlayer() diff --git a/TensorFlow_eaxmple/Model_train_test/model/LSTM/before/LSTM_realize_self1.py b/TensorFlow_eaxmple/Model_train_test/model/LSTM/before/LSTM_realize_self1.py new file mode 100644 index 0000000..255b6ae --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/model/LSTM/before/LSTM_realize_self1.py @@ -0,0 +1,179 @@ +import tensorflow as tf +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt +from tensorflow.keras.layers import Dense, Conv2D, Conv1D + + +class LSTM_realize(): + + def __init__(self, input=np.empty(shape=[100, 1]), units=30, batch_size=10, if_SA=False): + self.input = input + self.units = units + self.batch_size = batch_size + self.if_SA = if_SA + + def getLayer(self, layer='LSTM', query=None): + + if layer == 'LSTM': + layer = self.LSTM_layer() + return layer + + elif layer == 'ConvLSTM': + layer = self.convLSTM() + return layer + + elif layer == 'SA_LSTM': + layer = self.SA_LSTM(query) + return layer + + elif layer == 'SA_ConvLSTM': + self.if_SA = True + layer = self.SA_ConvLSTM(query) + return layer + + else: + raise ValueError("算法尚未实现") + + def LSTM_layer(self): + input = self.input + batch_size = self.batch_size + units = self.units + + epoch, filter_num, dims = input.shape + print(filter_num, dims) + + ct_1 = tf.zeros(shape=[1, units]) + ht_1 = tf.zeros(shape=[1, units]) + + for i in range(batch_size): + + + output = [] + + for batch in range(filter_num): + + new_input = input[0, batch, :] + new_input = tf.expand_dims(new_input, axis=0) + + if batch != 0: + new_input = tf.concat([new_input, ht_1], axis=-1) + + Wi = Dense(units)(new_input) + Wf = Dense(units)(new_input) + Wc = Dense(units)(new_input) + Wo = Dense(units)(new_input) + + ft = tf.nn.sigmoid(Wf) + it = tf.nn.sigmoid(Wi) + ct_ = tf.nn.tanh(Wc) + ot = tf.nn.sigmoid(Wo) + + ct = tf.add(tf.multiply(ft, ct_1), tf.multiply(it, ct_)) + ht = tf.multiply(tf.nn.tanh(ct), ot) + if batch == 0: + output = ht + else: + output = tf.concat([output, ht], axis=-1) + ht_1 = ht + ct_1 = ct + + if i == 0: + sum = output + # sum = tf.expand_dims(sum,axis=0) + else: + # output = tf.expand_dims(output, axis=0) + sum = tf.concat([sum, output], axis=0) + + # + output = tf.reshape(sum, [batch_size, filter_num]) + # output=tf.expand_dims(output,axis=0) + + print(output.shape) + return output + + def convLSTM(self): + return None + + def SA_ConvLSTM(self, query): + + self.query = query + + input = self.input + batch_size = self.batch_size + units = self.units + + epoch, filter_num, dims = input.shape + print(filter_num, dims) + + ct_1 = tf.zeros(shape=[1, units]) + ht_1 = tf.zeros(shape=[1, units]) + + for i in range(batch_size): + output = [] + + for batch in range(filter_num): + + new_input = input[0, batch, :] + new_input = tf.expand_dims(new_input, axis=0) + + if batch != 0: + new_input = tf.concat([new_input, ht_1], axis=-1) + + # new_input.shape=(1,50) + # self_attention模块 + temp = tf.expand_dims(query[i][batch], axis=0) + query_cell = tf.expand_dims(temp, axis=1) + new_input1 = tf.concat([new_input, query_cell], axis=1) + + (_, new_input_dims) = new_input1.shape + temp1 = Dense(new_input_dims)(new_input1) + temp1 = temp1[:, :-1] + + temp2 = tf.nn.tanh(temp1) + + Si = Dense(new_input_dims - 1)(temp2) + ai = tf.nn.softmax(Si) + ones = tf.ones(shape=ai.shape) + + new_input = tf.multiply((ai + ones), new_input) + new_input = tf.expand_dims(new_input, axis=1) + + Wi = Conv1D(units, kernel_size=3, padding='SAME')(new_input) + Wf = Conv1D(units, kernel_size=3, padding='SAME')(new_input) + Wc = Conv1D(units, kernel_size=3, padding='SAME')(new_input) + Wo = Conv1D(units, kernel_size=3, padding='SAME')(new_input) + + Wi = tf.keras.layers.Flatten()(Wi) + Wf = tf.keras.layers.Flatten()(Wf) + Wc = tf.keras.layers.Flatten()(Wc) + Wo = tf.keras.layers.Flatten()(Wo) + + ft = tf.nn.sigmoid(Wf) + it = tf.nn.sigmoid(Wi) + ct_ = tf.nn.tanh(Wc) + ot = tf.nn.sigmoid(Wo) + + ct = tf.add(tf.multiply(ft, ct_1), tf.multiply(it, ct_)) + ht = tf.multiply(tf.nn.tanh(ct), ot) + if batch == 0: + output = ht + else: + output = tf.concat([output, ht], axis=-1) + ht_1 = ht + ct_1 = ct + + if i == 0: + sum = output + else: + sum = tf.concat([sum, output], axis=0) + + output = tf.reshape(sum, [batch_size, filter_num]) + # output=tf.expand_dims(output,axis=0) + + print(output.shape) + return output + +# if __name__ == '__main__': +# input=tf.random.truncated_normal(shape=[5,10]) +# LSTM(input=input).getlayer() diff --git a/TensorFlow_eaxmple/Model_train_test/model/LSTM/before/LSTM_realize_self2.py b/TensorFlow_eaxmple/Model_train_test/model/LSTM/before/LSTM_realize_self2.py new file mode 100644 index 0000000..a53580d --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/model/LSTM/before/LSTM_realize_self2.py @@ -0,0 +1,162 @@ +import tensorflow as tf +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt +from tensorflow.keras.layers import Dense, Conv2D, Conv1D + +tf.keras.backend.clear_session() +import tensorflow.keras as keras +import tensorflow.keras.layers as layers + + +''' +支持各种dim维度的数据进去,但是SA模块的查询暂时有问题 +需要知道batch是第几次 +''' +class LSTM_realize(layers.Layer): + + def __init__(self, input, units=30, batch_size=10, if_SA=False): + super(LSTM_realize, self).__init__() + self.input = input + self.units = units + self.batch_size = batch_size + self.if_SA = if_SA + + def getLayer(self, layer='LSTM', query=None): + + if layer == 'LSTM': + layer = self.LSTM_layer() + return layer + + elif layer == 'ConvLSTM': + layer = self.convLSTM() + return layer + + elif layer == 'SA_LSTM': + layer = self.SA_LSTM(query) + return layer + + elif layer == 'SA_ConvLSTM': + self.if_SA = True + layer = self.SA_ConvLSTM(query) + return layer + + else: + raise ValueError("算法尚未实现") + + def LSTM_layer(self): + input = self.input + batch_size = self.batch_size + units = self.units + + epoch, filter_num, dims = input.shape + print(filter_num, dims) + + ct_1 = tf.zeros(shape=[batch_size, 1, units]) + ht_1 = tf.zeros(shape=[batch_size, 1, units]) + + for batch in range(filter_num): + new_input = input[:, batch, :] + new_input = tf.expand_dims(new_input, axis=1) + + if batch != 0: + new_input = tf.concat([new_input, ht_1], axis=-1) + + Wi = Dense(units)(new_input) + Wf = Dense(units)(new_input) + Wc = Dense(units)(new_input) + Wo = Dense(units)(new_input) + + ft = tf.nn.sigmoid(Wf) + it = tf.nn.sigmoid(Wi) + ct_ = tf.nn.tanh(Wc) + ot = tf.nn.sigmoid(Wo) + + ct = tf.add(tf.multiply(ft, ct_1), tf.multiply(it, ct_)) + ht = tf.multiply(tf.nn.tanh(ct), ot) + if batch == 0: + output = ht + else: + output = tf.concat([output, ht], axis=1) + ht_1 = ht + ct_1 = ct + + output = tf.reshape(output, [batch_size, filter_num, units]) + + print(output.shape) + return output + + def convLSTM(self): + return None + + def SA_ConvLSTM(self, query): + + self.query = query + print(query.shape) + + input = self.input + batch_size = self.batch_size + units = self.units + + epoch, filter_num, dims = input.shape + print(filter_num, dims) + + ct_1 = tf.zeros(shape=[batch_size, 1, units]) + ht_1 = tf.zeros(shape=[batch_size, 1, units]) + + for batch in range(filter_num): + + new_input = input[:, batch, :] + new_input = tf.expand_dims(new_input, axis=1) + + if batch != 0: + new_input = tf.concat([new_input, ht_1], axis=-1) + + # new_input.shape=(1,50,1) + # self_attention模块 + # query_cell = query[batch * batch_size:(batch + 1) * batch_size, batch, :] + query_cell = tf.expand_dims(query[batch * batch_size:(batch + 1) * batch_size, batch, :], axis=-1) + # query_cell = tf.expand_dims(temp, axis=1) + new_input1 = tf.concat([new_input, query_cell], axis=-1) + + (_, _, new_input_dims) = new_input1.shape + temp1 = Dense(new_input_dims)(new_input1) + temp1 = temp1[:, :, :-1] + + temp2 = tf.nn.tanh(temp1) + + Si = Dense(new_input_dims - 1)(temp2) + ai = tf.nn.softmax(Si) + ones = tf.ones(shape=ai.shape) + + new_input = tf.multiply((ai + ones), new_input) + + Wi = Conv1D(units, kernel_size=3, padding='SAME')(new_input) + Wf = Conv1D(units, kernel_size=3, padding='SAME')(new_input) + Wc = Conv1D(units, kernel_size=3, padding='SAME')(new_input) + Wo = Conv1D(units, kernel_size=3, padding='SAME')(new_input) + + ft = tf.nn.sigmoid(Wf) + it = tf.nn.sigmoid(Wi) + ct_ = tf.nn.tanh(Wc) + ot = tf.nn.sigmoid(Wo) + + ct = tf.add(tf.multiply(ft, ct_1), tf.multiply(it, ct_)) + ht = tf.multiply(tf.nn.tanh(ct), ot) + if batch == 0: + output = ht + else: + output = tf.concat([output, ht], axis=1) + ht_1 = ht + ct_1 = ct + + # output = tf.reshape(sum, [batch_size , filter_num, units]) + # output=tf.expand_dims(output,axis=0) + + print(output.shape) + + return output + +# if __name__ == '__main__': +# input=tf.random.truncated_normal(shape=[5,10]) +# LSTM(input=input).getlayer() diff --git a/TensorFlow_eaxmple/Model_train_test/model/LSTM/before/LSTM_realize_self3.py b/TensorFlow_eaxmple/Model_train_test/model/LSTM/before/LSTM_realize_self3.py new file mode 100644 index 0000000..3ffb6e3 --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/model/LSTM/before/LSTM_realize_self3.py @@ -0,0 +1,235 @@ +import tensorflow as tf +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt +from tensorflow.keras.layers import Dense, Conv2D, Conv1D + +tf.keras.backend.clear_session() +import tensorflow.keras as keras +import tensorflow.keras.layers as layers + +''' +增加了build和call方法,初始化参数z,做到可以知道当前的batch +也可以去查询SA模块 +init中带参数的层必须重写get_config +存在问题:首先是模型保存问题,添加self.z知道当前的训练次数以后,这个参数似乎无法保存,保存之后无法提取的问题 +第二点是 由于在训练的时候使用的model.fit,训练过程封装的太好了,以至于这个类似乎只在初始化和训练第一次时加载一次,self.z不会随着训练次数的增加而增加 +TODO 第一个问题已经解决 +''' + + +class LSTM_realize(layers.Layer): + + def __init__(self, units=30, batch_size=10, if_SA=False, if_Conv=False,query=None, **kwargs): + super(LSTM_realize, self).__init__() + self.units = units + self.batch_size = batch_size + self.if_SA = if_SA + self.if_Conv = if_Conv + self.query=query + + def get_config(self): + # 自定义层里面的属性 + config = ( + { + 'units': self.units, + 'batch_size': self.batch_size, + 'if_SA': self.if_SA, + 'if_Conv': self.if_Conv, + 'query': self.query + } + ) + base_config = super(LSTM_realize, self).get_config() + return dict(list(base_config.items()) + list(config.items())) + + def build(self, input_shape): + # 初始化可训练参数 + self.Wi = [] + self.Wf = [] + self.Wc = [] + self.Wo = [] + self.Si = [] + self.temp1 = [] + for i in range(input_shape[1]): + if not self.if_Conv: + Wi = Dense(self.units) + Wf = Dense(self.units) + Wc = Dense(self.units) + Wo = Dense(self.units) + else: + Wi = Conv1D(self.units, kernel_size=3, padding='SAME') + Wf = Conv1D(self.units, kernel_size=3, padding='SAME') + Wc = Conv1D(self.units, kernel_size=3, padding='SAME') + Wo = Conv1D(self.units, kernel_size=3, padding='SAME') + + if self.if_SA: + if i == 0: + Si = Dense(input_shape[-1]) + temp1 = Dense(input_shape[-1] + 1) + else: + Si = Dense(self.units + input_shape[-1]) + temp1 = Dense(self.units + input_shape[-1] + 1) + + self.Si.append(Si) + self.temp1.append(temp1) + + self.Wi.append(Wi) + self.Wf.append(Wf) + self.Wc.append(Wc) + self.Wo.append(Wo) + + self.z = 0 + + def call(self, inputs, layer='LSTM', **kwargs): + self.inputs = inputs + + return self.getLayer(layer=layer, query=self.query) + + def getLayer(self, layer='LSTM', query=None): + + if layer == 'LSTM': + self.if_SA = False + self.if_Conv = False + layer = self.LSTM_layer() + return layer + + elif layer == 'ConvLSTM': + self.if_SA = False + self.if_Conv = True + layer = self.convLSTM() + return layer + + elif layer == 'SA_LSTM': + self.if_SA = True + self.if_Conv = False + layer = self.SA_LSTM(query) + return layer + + elif layer == 'SA_ConvLSTM': + self.if_SA = True + self.if_Conv = True + layer = self.SA_ConvLSTM(query) + return layer + + else: + raise ValueError("算法尚未实现") + + def LSTM_layer(self): + input = self.inputs + batch_size = self.batch_size + units = self.units + + epoch, filter_num, dims = input.shape + print(filter_num, dims) + + for batch in range(filter_num): + new_input = input[:, batch, :] + new_input = tf.expand_dims(new_input, axis=1) + + if batch != 0: + new_input = tf.concat([new_input, ht_1], axis=-1) + + Wi = self.Wi[batch](new_input) + Wf = self.Wf[batch](new_input) + Wc = self.Wc[batch](new_input) + Wo = self.Wo[batch](new_input) + + ft = tf.nn.sigmoid(Wf) + it = tf.nn.sigmoid(Wi) + ct_ = tf.nn.tanh(Wc) + ot = tf.nn.sigmoid(Wo) + + if batch != 0: + ct = tf.add(tf.multiply(ft, ct_1), tf.multiply(it, ct_)) + else: + ct = tf.add(ft, tf.multiply(it, ct_)) + ht = tf.multiply(tf.nn.tanh(ct), ot) + if batch == 0: + output = ht + else: + output = tf.concat([output, ht], axis=1) + ht_1 = ht + ct_1 = ct + + # output = tf.reshape(output, [-1, filter_num, units]) + + print(output.shape) + return output + + def convLSTM(self): + return None + + def SA_ConvLSTM(self, query): + + # TODO 解决模型保存时,该数组会被序列化,然后返序列化以后不再是np或者是tf的问题 + query = tf.cast(query, dtype=tf.float32) + # print(query.shape) + + input = self.inputs + batch_size = self.batch_size + units = self.units + + epoch, filter_num, dims = input.shape + # print(filter_num, dims) + + for batch in range(filter_num): + + new_input = input[:, batch, :] + new_input = tf.expand_dims(new_input, axis=1) + + if batch != 0: + new_input = tf.concat([new_input, ht_1], axis=-1) + + # new_input.shape=(1,50,1) + # self_attention模块 + # query_cell = query[batch * batch_size:(batch + 1) * batch_size, batch, :] + query_cell = tf.expand_dims(query[self.z * batch_size:(self.z + 1) * batch_size, batch, :], axis=-1) + # query_cell = tf.expand_dims(temp, axis=1) + new_input1 = tf.concat([new_input, query_cell], axis=-1) + + (_, _, new_input_dims) = new_input1.shape + temp1 = self.temp1[batch](new_input1) + temp1 = temp1[:, :, :-1] + + temp2 = tf.nn.tanh(temp1) + + Si = self.Si[batch](temp2) + ai = tf.nn.softmax(Si) + ones = tf.ones(shape=ai.shape) + + new_input = tf.multiply((ai + ones), new_input) + + Wi = self.Wi[batch](new_input) + Wf = self.Wf[batch](new_input) + Wc = self.Wc[batch](new_input) + Wo = self.Wo[batch](new_input) + + ft = tf.nn.sigmoid(Wf) + it = tf.nn.sigmoid(Wi) + ct_ = tf.nn.tanh(Wc) + ot = tf.nn.sigmoid(Wo) + + if batch != 0: + ct = tf.add(tf.multiply(ft, ct_1), tf.multiply(it, ct_)) + else: + ct = tf.add(ft, tf.multiply(it, ct_)) + ht = tf.multiply(tf.nn.tanh(ct), ot) + if batch == 0: + output = ht + else: + output = tf.concat([output, ht], axis=1) + ht_1 = ht + ct_1 = ct + + # output = tf.reshape(sum, [batch_size , filter_num, units]) + # output=tf.expand_dims(output,axis=0) + + # print(output.shape) + print("z=", self.z) + self.z += 1 + + return output + +# if __name__ == '__main__': +# input=tf.random.truncated_normal(shape=[5,10]) +# LSTM(input=input).getlayer() diff --git a/TensorFlow_eaxmple/Model_train_test/model/LSTM/before/LSTM_realize_self4.py b/TensorFlow_eaxmple/Model_train_test/model/LSTM/before/LSTM_realize_self4.py new file mode 100644 index 0000000..1ccbae5 --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/model/LSTM/before/LSTM_realize_self4.py @@ -0,0 +1,393 @@ +import tensorflow as tf +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt +from tensorflow.keras.layers import Dense, Conv2D, Conv1D + +# tf.keras.backend.clear_session() +from tensorflow.keras import * +import tensorflow.keras.layers as layers + +#TODO 权重非共享式LSTM +''' +利用函数式编程的方式解决self3存在的无法知道训练了多少次的问题 +''' + + +class PredictModel(Model): + def __init__(self, filter_num, dims, batch_size, query_label): + # 调用父类__init__()方法 + super(PredictModel, self).__init__() + self.filter_num = filter_num + self.dims = dims + self.batch_size = batch_size + self.query = query_label + self.train_loss = None + self.val_loss = [] + # self.LSTM_object = LSTM_realize(units=50, batch_size=batch_size, if_SA=False, if_Conv=False) + self.input_model_shape = tf.keras.Input(shape=[self.filter_num, self.dims]) + self.LSTM_object = LSTM_realize(units=20, batch_size=self.batch_size, if_SA=True, if_Conv=True, + query=self.query) + # self.drop1 = tf.keras.layers.Dropout(0.2) + self.bn = tf.keras.layers.BatchNormalization() + self.d1 = tf.keras.layers.Dense(10) + self.drop2 = tf.keras.layers.Dropout(0.2) + self.d2 = tf.keras.layers.Dense(1, name='output') + + # def build(self, input_shape): + # pass + + # 将动态图转换为静态图 + # 在静态图模型中,输入数据的数据维度不对、数据类型不对、数据名称不对都会报错 + # @tf.function(input_signature=[tf.TensorSpec([None, 30,50], tf.float32, name='digits')]) + # @tf.function + def call(self, inputs, training=None, mask=None, z=0, label=None, query=None, batch_size=None): + if query == None: + query = self.query + if batch_size == None: + batch_size = self.batch_size + input = tf.cast(inputs, tf.float32) + LSTM = self.LSTM_object(inputs=input, layer='SA_ConvLSTM', z=z, query=query, batch_size=batch_size) + # LSTM = self.LSTM_object(inputs=input, layer='LSTM') + # drop1 = self.drop1(LSTM) + # bn = self.bn(drop1) + bn = self.bn(LSTM) + d1 = self.d1(bn) + drop2 = self.drop2(d1) + output = self.d2(drop2) + # model = tf.keras.Model(inputs=input, outputs=output) + # return model + + return output + + def get_loss(self, inputs_tensor, label, query=None, batch_size=None, z=0): + if query == None: + query = self.query + if batch_size == None: + batch_size = self.batch_size + # inputs = self.input_model_shape(inputs_tensor) + input = tf.cast(inputs_tensor, tf.float32) + LSTM = self.LSTM_object(inputs=input, layer='SA_ConvLSTM', z=z, query=query, batch_size=batch_size) + # LSTM = self.LSTM_object(inputs=input, layer='LSTM') + # drop1 = self.drop1(LSTM) + # bn = self.bn(drop1) + bn = self.bn(LSTM) + d1 = self.d1(bn) + drop2 = self.drop2(d1) + output = self.d2(drop2) + # reduce_mean降维计算均值 + loss = tf.reduce_mean(tf.keras.losses.mse(y_true=label, y_pred=output)) + return loss + + def get_selfLoss(self, inputs_tensor, label, query=None, batch_size=None, z=0): + if query == None: + query = self.query + if batch_size == None: + batch_size = self.batch_size + # inputs = self.input_model_shape(inputs_tensor) + input = tf.cast(inputs_tensor, tf.float32) + LSTM = self.LSTM_object(inputs=input, layer='SA_ConvLSTM', z=z, query=query, batch_size=batch_size) + batch, filer_num, dims = LSTM.shape + aloss = 0 + bloss = 0 + for i in range(filer_num - 1): + aloss += tf.abs(LSTM[:, i + 1, :] - LSTM[:, i, :]) + for i in range(filer_num-3): + bloss += tf.abs(LSTM[:, i + 3, :] - LSTM[:, i+2, :]-(LSTM[:, i + 1, :] - LSTM[:, i, :])) + # LSTM = self.LSTM_object(inputs=input, layer='LSTM') + # drop1 = self.drop1(LSTM) + # bn = self.bn(drop1) + aloss = tf.reduce_mean(aloss) + bloss = tf.reduce_mean(bloss) + desired_aloss = 0.05 + desired_bloss = 0.1 + # aloss的权重惩罚项 + aalpha = 0.1 + abeita = 0.01 + # bloss的权重惩罚项 + bbeita = 0.01 + bn = self.bn(LSTM) + d1 = self.d1(bn) + drop2 = self.drop2(d1) + output = self.d2(drop2) + # reduce_mean降维计算均值 + loss = tf.reduce_mean(tf.keras.losses.mse(y_true=label, y_pred=output)) + # total_loss = loss+abeita*(desired_aloss*tf.math.log(desired_aloss/aloss)+(1-desired_aloss)*tf.math.log((1-desired_aloss)/(1-aloss)))+bbeita*(desired_bloss*tf.math.log(desired_bloss/bloss)+(1-desired_bloss)*tf.math.log((1-desired_bloss)/(1-bloss))) + total_loss = loss +abeita*aloss+bbeita*bloss + return total_loss + + def get_grad(self, input_tensor, label, query=None, z=0): + with tf.GradientTape() as tape: + # todo 原本tape只会监控由tf.Variable创建的trainable=True属性 + tape.watch(self.variables) + L = self.get_selfLoss(input_tensor, label=label, query=query, z=z) + # 保存一下loss,用于输出 + self.train_loss = L + g = tape.gradient(L, self.variables) + return g + + def train(self, input_tensor, label, query=None, learning_rate=1e-3, z=0): + g = self.get_grad(input_tensor, label=label, query=query, z=z) + optimizers.Adam(learning_rate).apply_gradients(zip(g, self.variables)) + return self.train_loss + + # 暂时只支持batch_size等于1,不然要传z比较麻烦 + def get_val_loss(self, val_data, val_label, val_query, batch_size=1, z=0): + self.val_loss = [] + size, filernums, dims = val_data.shape + if batch_size == None: + batch_size = self.batch_size + for epoch in range(size): + each_val_data = val_data[epoch, :, :] + each_val_query = val_query[epoch, :, :] + each_val_label = val_label[epoch, :] + each_val_data = tf.expand_dims(each_val_data, axis=0) + each_val_query = tf.expand_dims(each_val_query, axis=0) + each_val_label = tf.expand_dims(each_val_label, axis=0) + input = tf.cast(each_val_data, tf.float32) + LSTM = self.LSTM_object(inputs=input, layer='SA_ConvLSTM', z=z, query=each_val_query, batch_size=batch_size) + # LSTM = self.LSTM_object(inputs=input, layer='LSTM') + # drop1 = self.drop1(LSTM) + # bn = self.bn(drop1) + bn = self.bn(LSTM) + d1 = self.d1(bn) + drop2 = self.drop2(d1) + output = self.d2(drop2) + # reduce_mean降维计算均值 + each_loss = tf.reduce_mean(tf.keras.losses.mse(y_true=each_val_label, y_pred=output)) + self.val_loss.append(each_loss) + val_total_loss = tf.reduce_mean(self.val_loss) + return val_total_loss + + +class LSTM_realize(layers.Layer): + + def __init__(self, units=30, batch_size=10, if_SA=False, if_Conv=False, query=None, **kwargs): + super(LSTM_realize, self).__init__() + self.units = units + self.batch_size = batch_size + self.if_SA = if_SA + self.if_Conv = if_Conv + self.query = query + + def get_config(self): + # 自定义层里面的属性 + config = ( + { + 'units': self.units, + 'batch_size': self.batch_size, + 'if_SA': self.if_SA, + 'if_Conv': self.if_Conv, + 'query': self.query + } + ) + base_config = super(LSTM_realize, self).get_config() + return dict(list(base_config.items()) + list(config.items())) + + def build(self, input_shape): + # 初始化可训练参数 + self.Wi = [] + self.Wf = [] + self.Wc = [] + self.Wo = [] + self.Si = [] + self.temp1 = [] + for i in range(input_shape[1]): + if not self.if_Conv: + Wi = Dense(self.units) + Wf = Dense(self.units) + Wc = Dense(self.units) + Wo = Dense(self.units) + else: + Wi = Conv1D(self.units, kernel_size=3, padding='SAME') + Wf = Conv1D(self.units, kernel_size=3, padding='SAME') + Wc = Conv1D(self.units, kernel_size=3, padding='SAME') + Wo = Conv1D(self.units, kernel_size=3, padding='SAME') + + if self.if_SA: + if i == 0: + Si = Dense(input_shape[-1]) + temp1 = Dense(input_shape[-1] + 1) + else: + Si = Dense(self.units + input_shape[-1]) + temp1 = Dense(self.units + input_shape[-1] + 1) + + self.Si.append(Si) + self.temp1.append(temp1) + + self.Wi.append(Wi) + self.Wf.append(Wf) + self.Wc.append(Wc) + self.Wo.append(Wo) + + # self.z = 0 + + def call(self, inputs, layer='LSTM', z=0, query=None, batch_size=None, **kwargs): + self.inputs = inputs + # 这里在之前判断过,就不需要判断了 + # if query == None: + # query = self.query + # if batch_size == None: + # batch_size = self.batch_size + return self.getLayer(layer=layer, query=query, z=z, batch_size=batch_size) + + def getLayer(self, layer='LSTM', query=None, z=0, batch_size=None): + + if layer == 'LSTM': + self.if_SA = False + self.if_Conv = False + layer = self.LSTM_layer() + return layer + + elif layer == 'ConvLSTM': + self.if_SA = False + self.if_Conv = True + layer = self.convLSTM() + return layer + + elif layer == 'SA_LSTM': + self.if_SA = True + self.if_Conv = False + layer = self.SA_LSTM(query, z=z) + return layer + + elif layer == 'SA_ConvLSTM': + self.if_SA = True + self.if_Conv = True + layer = self.SA_ConvLSTM(query, z=z, batch_size=batch_size) + return layer + elif layer == 'SA_ConvLSTM1': + self.if_SA = True + self.if_Conv = True + layer = self.SA_ConvLSTM1() + return layer + + else: + raise ValueError("算法尚未实现") + + def LSTM_layer(self): + input = self.inputs + batch_size = self.batch_size + units = self.units + + epoch, filter_num, dims = input.shape + print(filter_num, dims) + + for batch in range(filter_num): + new_input = input[:, batch, :] + new_input = tf.expand_dims(new_input, axis=1) + + if batch != 0: + new_input = tf.concat([new_input, ht_1], axis=-1) + + Wi = self.Wi[batch](new_input) + Wf = self.Wf[batch](new_input) + Wc = self.Wc[batch](new_input) + Wo = self.Wo[batch](new_input) + + ft = tf.nn.sigmoid(Wf) + it = tf.nn.sigmoid(Wi) + ct_ = tf.nn.tanh(Wc) + ot = tf.nn.sigmoid(Wo) + + if batch != 0: + ct = tf.add(tf.multiply(ft, ct_1), tf.multiply(it, ct_)) + else: + ct = tf.add(ft, tf.multiply(it, ct_)) + ht = tf.multiply(tf.nn.tanh(ct), ot) + if batch == 0: + output = ht + else: + output = tf.concat([output, ht], axis=1) + ht_1 = ht + ct_1 = ct + + # output = tf.reshape(output, [-1, filter_num, units]) + + print(output.shape) + return output + + def convLSTM(self): + return None + + # self_attention with true_query + def SA_ConvLSTM(self, query, z=0, batch_size=None): + + # TODO 解决模型保存时,该数组会被序列化,然后返序列化以后不再是np或者是tf的问题 + query = tf.cast(query, dtype=tf.float32) + # print(query.shape) + + if batch_size == None: + batch_size = self.batch_size + + input = self.inputs + # batch_size = self.batch_size + units = self.units + + epoch, filter_num, dims = input.shape + # print(filter_num, dims) + + for batch in range(filter_num): + + new_input = input[:, batch, :] + new_input = tf.expand_dims(new_input, axis=1) + + if batch != 0: + new_input = tf.concat([new_input, ht_1], axis=-1) + + # new_input.shape=(1,50,1) + # self_attention模块 + # query_cell = query[batch * batch_size:(batch + 1) * batch_size, batch, :] + query_cell = tf.expand_dims(query[z * batch_size:(z + 1) * batch_size, batch, :], axis=-1) + # query_cell = tf.expand_dims(temp, axis=1) + new_input1 = tf.concat([new_input, query_cell], axis=-1) + + (_, _, new_input_dims) = new_input1.shape + temp1 = self.temp1[batch](new_input1) + temp1 = temp1[:, :, :-1] + + temp2 = tf.nn.tanh(temp1) + + Si = self.Si[batch](temp2) + ai = tf.nn.softmax(Si) + ones = tf.ones(shape=ai.shape) + + new_input = tf.multiply((ai + ones), new_input) + + Wi = self.Wi[batch](new_input) + Wf = self.Wf[batch](new_input) + Wc = self.Wc[batch](new_input) + Wo = self.Wo[batch](new_input) + + ft = tf.nn.sigmoid(Wf) + it = tf.nn.sigmoid(Wi) + ct_ = tf.nn.tanh(Wc) + ot = tf.nn.sigmoid(Wo) + + if batch != 0: + ct = tf.add(tf.multiply(ft, ct_1), tf.multiply(it, ct_)) + else: + ct = tf.add(ft, tf.multiply(it, ct_)) + ht = tf.multiply(tf.nn.tanh(ct), ot) + if batch == 0: + output = ht + else: + output = tf.concat([output, ht], axis=1) + ht_1 = ht + ct_1 = ct + + # output = tf.reshape(sum, [batch_size , filter_num, units]) + # output=tf.expand_dims(output,axis=0) + + # print(output.shape) + # print("z=", z) + + return output + + # self_attention with kqv + def SA_ConvLSTM1(self): + pass + +# if __name__ == '__main__': +# input=tf.random.truncated_normal(shape=[5,10]) +# LSTM(input=input).getlayer() diff --git a/TensorFlow_eaxmple/Model_train_test/model/LSTM/before/LSTM_realize_self5.py b/TensorFlow_eaxmple/Model_train_test/model/LSTM/before/LSTM_realize_self5.py new file mode 100644 index 0000000..c408e85 --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/model/LSTM/before/LSTM_realize_self5.py @@ -0,0 +1,740 @@ +import tensorflow as tf +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt +from tensorflow.keras.layers import Dense, Conv2D, Conv1D + +# tf.keras.backend.clear_session() +from tensorflow.keras import * +import tensorflow.keras.layers as layers + +# TODO 权重非共享式LSTM +''' +尝试LSTM权重共享策略:所谓权重共享策略是指: +cell 的权重是共享的,这是什么意思呢?这是指这张图片上有三个绿色的大框,代表三个 cell 对吧, +但是实际上,它只是代表了一个 cell 在不同时序时候的状态,所有的数据只会通过一个 cell,然后不断更新它的权重。 + +尝试每一个cell权重共享的lstm +同时增加了Mutihead_self_attention模块,放在了PredictModel_MutiHead(Model) +''' + + +class PredictModel(Model): + def __init__(self, batch_size, query_label=None): + # 调用父类__init__()方法 + super(PredictModel, self).__init__() + self.batch_size = batch_size + self.query = query_label + self.train_loss = None + self.val_loss = [] + # self.LSTM_object = LSTM_realize(units=50, batch_size=batch_size, if_SA=False, if_Conv=False) + # self.input_model_shape = tf.keras.Input(shape=[self.filter_num, self.dims]) + self.LSTM_object1 = LSTM_realize(units=128, batch_size=self.batch_size, if_SA=True, if_Conv=True, + query=self.query) + self.LSTM_object2 = LSTM_realize(units=256, batch_size=self.batch_size, if_SA=True, if_Conv=True, + query=self.query) + self.LSTM_object3 = LSTM_realize(units=512, batch_size=self.batch_size, if_SA=True, if_Conv=True, + query=self.query) + # self.drop1 = tf.keras.layers.Dropout(0.2) + self.bn1 = tf.keras.layers.BatchNormalization() + self.bn2 = tf.keras.layers.BatchNormalization() + self.bn3 = tf.keras.layers.BatchNormalization() + self.d1 = tf.keras.layers.Dense(10) + self.drop2 = tf.keras.layers.Dropout(0.2) + self.d2 = tf.keras.layers.Dense(1, name='output') + + # def build(self, input_shape): + # pass + + # 将动态图转换为静态图 + # 在静态图模型中,输入数据的数据维度不对、数据类型不对、数据名称不对都会报错 + # @tf.function(input_signature=[tf.TensorSpec([None, 30,50], tf.float32, name='digits')]) + # @tf.function + def call(self, inputs, training=None, mask=None, z=0, label=None, query=None, batch_size=None): + if query == None: + query = self.query + if batch_size == None: + batch_size = self.batch_size + input = tf.cast(inputs, tf.float32) + LSTM = self.LSTM_object(inputs=input, layer='SA_ConvLSTM', z=z, query=query, batch_size=batch_size) + bn = self.bn1(LSTM) + + LSTM = self.LSTM_object(inputs=bn, layer='SA_ConvLSTM', z=z, query=query, batch_size=batch_size) + bn = self.bn2(LSTM) + + LSTM = self.LSTM_object(inputs=bn, layer='SA_ConvLSTM', z=z, query=query, batch_size=batch_size) + bn = self.bn3(LSTM) + # LSTM = self.LSTM_object(inputs=input, layer='LSTM') + # drop1 = self.drop1(LSTM) + # bn = self.bn(drop1) + + d1 = self.d1(bn) + # drop2 = self.drop2(d1) + output = self.d2(d1) + # model = tf.keras.Model(inputs=input, outputs=output) + # return model + + return output + + def get_loss(self, inputs_tensor, label, query=None, batch_size=None, z=0): + if query == None: + query = self.query + if batch_size == None: + batch_size = self.batch_size + # inputs = self.input_model_shape(inputs_tensor) + input = tf.cast(inputs_tensor, tf.float32) + LSTM = self.LSTM_object(inputs=input, layer='SA_ConvLSTM', z=z, query=query, batch_size=batch_size) + bn = self.bn1(LSTM) + + LSTM = self.LSTM_object(inputs=bn, layer='SA_ConvLSTM', z=z, query=query, batch_size=batch_size) + bn = self.bn2(LSTM) + + LSTM = self.LSTM_object(inputs=bn, layer='SA_ConvLSTM', z=z, query=query, batch_size=batch_size) + bn = self.bn3(LSTM) + d1 = self.d1(bn) + # drop2 = self.drop2(d1) + output = self.d2(d1) + # reduce_mean降维计算均值 + loss = tf.reduce_mean(tf.keras.losses.mse(y_true=label, y_pred=output)) + return loss + + def get_selfLoss(self, inputs_tensor, label, query=None, batch_size=None, z=0): + if query == None: + query = self.query + if batch_size == None: + batch_size = self.batch_size + # inputs = self.input_model_shape(inputs_tensor) + input = tf.cast(inputs_tensor, tf.float32) + LSTM = self.LSTM_object(inputs=input, layer='SA_ConvLSTM', z=z, query=query, batch_size=batch_size) + batch, filer_num, dims = LSTM.shape + aloss = 0 + bloss = 0 + for i in range(filer_num - 1): + aloss += tf.abs(LSTM[:, i + 1, :] - LSTM[:, i, :]) + for i in range(filer_num - 3): + bloss += tf.abs(LSTM[:, i + 3, :] - LSTM[:, i + 2, :] - (LSTM[:, i + 1, :] - LSTM[:, i, :])) + # LSTM = self.LSTM_object(inputs=input, layer='LSTM') + # drop1 = self.drop1(LSTM) + # bn = self.bn(drop1) + aloss = tf.reduce_mean(aloss) + bloss = tf.reduce_mean(bloss) + desired_aloss = 0.05 + desired_bloss = 0.1 + # aloss的权重惩罚项 + aalpha = 0.1 + abeita = 0.01 + # bloss的权重惩罚项 + bbeita = 0.01 + bn = self.bn(LSTM) + d1 = self.d1(bn) + drop2 = self.drop2(d1) + output = self.d2(drop2) + # reduce_mean降维计算均值 + loss = tf.reduce_mean(tf.keras.losses.mse(y_true=label, y_pred=output)) + # total_loss = loss+abeita*(desired_aloss*tf.math.log(desired_aloss/aloss)+(1-desired_aloss)*tf.math.log((1-desired_aloss)/(1-aloss)))+bbeita*(desired_bloss*tf.math.log(desired_bloss/bloss)+(1-desired_bloss)*tf.math.log((1-desired_bloss)/(1-bloss))) + total_loss = loss + abeita * aloss + bbeita * bloss + return total_loss + + def get_grad(self, input_tensor, label, query=None, z=0): + with tf.GradientTape() as tape: + # todo 原本tape只会监控由tf.Variable创建的trainable=True属性 + tape.watch(self.variables) + L = self.get_loss(input_tensor, label=label, query=query, z=z) + # 保存一下loss,用于输出 + self.train_loss = L + g = tape.gradient(L, self.variables) + return g + + def train(self, input_tensor, label, query=None, learning_rate=1e-3, z=0): + g = self.get_grad(input_tensor, label=label, query=query, z=z) + optimizers.Adam(learning_rate).apply_gradients(zip(g, self.variables)) + return self.train_loss + + # 暂时只支持batch_size等于1,不然要传z比较麻烦 + def get_val_loss(self, val_data, val_label, val_query, batch_size=1, z=0): + self.val_loss = [] + size, filernums, dims = val_data.shape + if batch_size == None: + batch_size = self.batch_size + for epoch in range(size): + each_val_data = val_data[epoch, :, :] + each_val_query = val_query[epoch, :, :] + each_val_label = val_label[epoch, :] + each_val_data = tf.expand_dims(each_val_data, axis=0) + each_val_query = tf.expand_dims(each_val_query, axis=0) + each_val_label = tf.expand_dims(each_val_label, axis=0) + input = tf.cast(each_val_data, tf.float32) + LSTM = self.LSTM_object(inputs=input, layer='SA_ConvLSTM', z=z, query=each_val_query, batch_size=batch_size) + # LSTM = self.LSTM_object(inputs=input, layer='LSTM') + # drop1 = self.drop1(LSTM) + # bn = self.bn(drop1) + bn = self.bn(LSTM) + d1 = self.d1(bn) + drop2 = self.drop2(d1) + output = self.d2(drop2) + # reduce_mean降维计算均值 + each_loss = tf.reduce_mean(tf.keras.losses.mse(y_true=each_val_label, y_pred=output)) + self.val_loss.append(each_loss) + val_total_loss = tf.reduce_mean(self.val_loss) + return val_total_loss + + +class PredictModel_MutiHead(Model): + def __init__(self, batch_size, query_label): + # 调用父类__init__()方法 + super(PredictModel, self).__init__() + self.batch_size = batch_size + self.query = query_label + self.train_loss = None + self.val_loss = [] + # self.LSTM_object = LSTM_realize(units=50, batch_size=batch_size, if_SA=False, if_Conv=False) + # self.input_model_shape = tf.keras.Input(shape=[self.filter_num, self.dims]) + self.LSTM_object1 = LSTM_realize(units=128, batch_size=self.batch_size, if_SA=True, if_Conv=True, + query=self.query) + self.LSTM_object2 = LSTM_realize(units=256, batch_size=self.batch_size, if_SA=True, if_Conv=True, + query=self.query) + self.LSTM_object3 = LSTM_realize(units=512, batch_size=self.batch_size, if_SA=True, if_Conv=True, + query=self.query) + # self.drop1 = tf.keras.layers.Dropout(0.2) + self.bn1 = tf.keras.layers.BatchNormalization() + self.bn2 = tf.keras.layers.BatchNormalization() + self.bn3 = tf.keras.layers.BatchNormalization() + self.d1 = tf.keras.layers.Dense(10) + self.drop2 = tf.keras.layers.Dropout(0.2) + self.d2 = tf.keras.layers.Dense(1, name='output') + + # def build(self, input_shape): + # pass + + # 将动态图转换为静态图 + # 在静态图模型中,输入数据的数据维度不对、数据类型不对、数据名称不对都会报错 + # @tf.function(input_signature=[tf.TensorSpec([None, 30,50], tf.float32, name='digits')]) + # @tf.function + def call(self, inputs, training=None, mask=None, z=0, label=None, query=None, batch_size=None): + if query == None: + query = self.query + if batch_size == None: + batch_size = self.batch_size + input = tf.cast(inputs, tf.float32) + LSTM = self.LSTM_object(inputs=input, layer='SA_ConvLSTM', z=z, query=query, batch_size=batch_size) + bn = self.bn1(LSTM) + + LSTM = self.LSTM_object(inputs=bn, layer='SA_ConvLSTM', z=z, query=query, batch_size=batch_size) + bn = self.bn2(LSTM) + + LSTM = self.LSTM_object(inputs=bn, layer='SA_ConvLSTM', z=z, query=query, batch_size=batch_size) + bn = self.bn3(LSTM) + # LSTM = self.LSTM_object(inputs=input, layer='LSTM') + # drop1 = self.drop1(LSTM) + # bn = self.bn(drop1) + + d1 = self.d1(bn) + # drop2 = self.drop2(d1) + output = self.d2(d1) + # model = tf.keras.Model(inputs=input, outputs=output) + # return model + + return output + + def get_loss(self, inputs_tensor, label, query=None, batch_size=None, z=0): + if query == None: + query = self.query + if batch_size == None: + batch_size = self.batch_size + # inputs = self.input_model_shape(inputs_tensor) + input = tf.cast(inputs_tensor, tf.float32) + LSTM = self.LSTM_object(inputs=input, layer='SA_ConvLSTM', z=z, query=query, batch_size=batch_size) + bn = self.bn1(LSTM) + + LSTM = self.LSTM_object(inputs=bn, layer='SA_ConvLSTM', z=z, query=query, batch_size=batch_size) + bn = self.bn2(LSTM) + + LSTM = self.LSTM_object(inputs=bn, layer='SA_ConvLSTM', z=z, query=query, batch_size=batch_size) + bn = self.bn3(LSTM) + d1 = self.d1(bn) + # drop2 = self.drop2(d1) + output = self.d2(d1) + # reduce_mean降维计算均值 + loss = tf.reduce_mean(tf.keras.losses.mse(y_true=label, y_pred=output)) + return loss + + def get_selfLoss(self, inputs_tensor, label, query=None, batch_size=None, z=0): + if query == None: + query = self.query + if batch_size == None: + batch_size = self.batch_size + # inputs = self.input_model_shape(inputs_tensor) + input = tf.cast(inputs_tensor, tf.float32) + LSTM = self.LSTM_object(inputs=input, layer='SA_ConvLSTM', z=z, query=query, batch_size=batch_size) + batch, filer_num, dims = LSTM.shape + aloss = 0 + bloss = 0 + for i in range(filer_num - 1): + aloss += tf.abs(LSTM[:, i + 1, :] - LSTM[:, i, :]) + for i in range(filer_num - 3): + bloss += tf.abs(LSTM[:, i + 3, :] - LSTM[:, i + 2, :] - (LSTM[:, i + 1, :] - LSTM[:, i, :])) + # LSTM = self.LSTM_object(inputs=input, layer='LSTM') + # drop1 = self.drop1(LSTM) + # bn = self.bn(drop1) + aloss = tf.reduce_mean(aloss) + bloss = tf.reduce_mean(bloss) + desired_aloss = 0.05 + desired_bloss = 0.1 + # aloss的权重惩罚项 + aalpha = 0.1 + abeita = 0.01 + # bloss的权重惩罚项 + bbeita = 0.01 + bn = self.bn(LSTM) + d1 = self.d1(bn) + drop2 = self.drop2(d1) + output = self.d2(drop2) + # reduce_mean降维计算均值 + loss = tf.reduce_mean(tf.keras.losses.mse(y_true=label, y_pred=output)) + # total_loss = loss+abeita*(desired_aloss*tf.math.log(desired_aloss/aloss)+(1-desired_aloss)*tf.math.log((1-desired_aloss)/(1-aloss)))+bbeita*(desired_bloss*tf.math.log(desired_bloss/bloss)+(1-desired_bloss)*tf.math.log((1-desired_bloss)/(1-bloss))) + total_loss = loss + abeita * aloss + bbeita * bloss + return total_loss + + def get_grad(self, input_tensor, label, query=None, z=0): + with tf.GradientTape() as tape: + # todo 原本tape只会监控由tf.Variable创建的trainable=True属性 + tape.watch(self.variables) + L = self.get_loss(input_tensor, label=label, query=query, z=z) + # 保存一下loss,用于输出 + self.train_loss = L + g = tape.gradient(L, self.variables) + return g + + def train(self, input_tensor, label, query=None, learning_rate=1e-3, z=0): + g = self.get_grad(input_tensor, label=label, query=query, z=z) + optimizers.Adam(learning_rate).apply_gradients(zip(g, self.variables)) + return self.train_loss + + # 暂时只支持batch_size等于1,不然要传z比较麻烦 + def get_val_loss(self, val_data, val_label, val_query, batch_size=1, z=0): + self.val_loss = [] + size, filernums, dims = val_data.shape + if batch_size == None: + batch_size = self.batch_size + for epoch in range(size): + each_val_data = val_data[epoch, :, :] + each_val_query = val_query[epoch, :, :] + each_val_label = val_label[epoch, :] + each_val_data = tf.expand_dims(each_val_data, axis=0) + each_val_query = tf.expand_dims(each_val_query, axis=0) + each_val_label = tf.expand_dims(each_val_label, axis=0) + input = tf.cast(each_val_data, tf.float32) + LSTM = self.LSTM_object(inputs=input, layer='SA_ConvLSTM', z=z, query=each_val_query, batch_size=batch_size) + # LSTM = self.LSTM_object(inputs=input, layer='LSTM') + # drop1 = self.drop1(LSTM) + # bn = self.bn(drop1) + bn = self.bn(LSTM) + d1 = self.d1(bn) + drop2 = self.drop2(d1) + output = self.d2(drop2) + # reduce_mean降维计算均值 + each_loss = tf.reduce_mean(tf.keras.losses.mse(y_true=each_val_label, y_pred=output)) + self.val_loss.append(each_loss) + val_total_loss = tf.reduce_mean(self.val_loss) + return val_total_loss + + +class LSTM_realize(layers.Layer): + # 定义两个权重初始化方法,方便后续调用 + k_ini = initializers.GlorotUniform() + b_ini = initializers.Zeros() + + def __init__(self, units=30, batch_size=10, if_SA=False, if_Conv=False, if_mutiHead=False, num_heads=8, + qkv_bias=False, + qk_scale=None, + attn_drop_ratio=0., + proj_drop_ratio=0., query=None, **kwargs): + super(LSTM_realize, self).__init__() + self.units = units + self.batch_size = batch_size + self.if_SA = if_SA + self.if_Conv = if_Conv + self.query = query + self.if_mutiHead = if_mutiHead + self.num_heads = num_heads + self.qkv_bias = qkv_bias + self.qkv_scale = qk_scale + self.attn_drop_ratio = attn_drop_ratio + self.proj_drop_ratio = proj_drop_ratio + + def get_config(self): + # 自定义层里面的属性 + config = ( + { + 'units': self.units, + 'batch_size': self.batch_size, + 'if_SA': self.if_SA, + 'if_Conv': self.if_Conv, + 'proj_drop_ratio': self.proj_drop_ratio, + 'attn_drop_ratio': self.attn_drop_ratio, + 'qkv_bias': self.qkv_bias, + 'if_mutiHead': self.if_mutiHead, + 'num_heads': self.num_heads, + 'qkv_scale': self.qkv_scale, + 'query': self.query + } + ) + base_config = super(LSTM_realize, self).get_config() + return dict(list(base_config.items()) + list(config.items())) + + def build(self, input_shape): + # 初始化可训练参数 + self.Wi = [] + self.Wf = [] + self.Wc = [] + self.Wo = [] + self.Si = [] + self.temp1 = [] + + if not self.if_Conv: + Wi = Dense(self.units, kernel_initializer=self.k_ini) + Wf = Dense(self.units, kernel_initializer=self.k_ini) + Wc = Dense(self.units, kernel_initializer=self.k_ini) + Wo = Dense(self.units, kernel_initializer=self.k_ini) + else: + Wi = Conv1D(self.units, kernel_size=3, padding='SAME', kernel_initializer=self.k_ini) + Wf = Conv1D(self.units, kernel_size=3, padding='SAME', kernel_initializer=self.k_ini) + Wc = Conv1D(self.units, kernel_size=3, padding='SAME', kernel_initializer=self.k_ini) + Wo = Conv1D(self.units, kernel_size=3, padding='SAME', kernel_initializer=self.k_ini) + + if self.if_SA: + # Si = Dense(input_shape[-1]) + # temp1 = Dense(input_shape[-1] + 1) + Si = Dense(self.units + input_shape[-1], use_bias=False) + temp1 = Dense(self.units + input_shape[-1] + 1, use_bias=False) + + self.Si.append(Si) + self.temp1.append(temp1) + if self.if_mutiHead: + head_dim = input_shape[-1] // self.num_heads + # 如果传入了qk_scale就用传入的,如果没传入就用sqrt(head_dim) + self.scale = self.qkv_scale or head_dim ** -0.5 + + self.qkv = Dense(3 * (self.units + input_shape[-1]), use_bias=self.qkv_bias, name="qkv", + kernel_initializer=self.k_ini, bias_initializer=self.b_ini) + self.attn_drop = layers.Dropout(self.attn_drop_ratio) + # 这里的全连接层是生成Wo矩阵,将得到的b进一步拼接 + # 由于这里multi-head self-attention模块的输入输出维度是一样的,所以这里的节点个数是dim + + self.proj = layers.Dense(input_shape[-1] + self.units, name="out", + kernel_initializer=self.k_ini, bias_initializer=self.b_ini) + self.proj_drop = layers.Dropout(self.proj_drop_ratio) + + self.reshape1 = tf.keras.layers.Reshape( + target_shape=(1, 3, self.num_heads, (input_shape[-1] + self.units) // self.num_heads)) + + self.reshape2 = tf.keras.layers.Reshape( + target_shape=(1, (input_shape[-1] + self.units))) + + + + self.Wi.append(Wi) + self.Wf.append(Wf) + self.Wc.append(Wc) + self.Wo.append(Wo) + + def call(self, inputs, layer='LSTM', z=0, query=None, batch_size=None, **kwargs): + self.inputs = inputs + # 这里在之前判断过,就不需要判断了 + # if query == None: + # query = self.query + # if batch_size == None: + # batch_size = self.batch_size + return self.getLayer(layer=layer, query=query, z=z, batch_size=batch_size) + + def getLayer(self, layer='LSTM', query=None, z=0, batch_size=None): + if layer == 'LSTM': + self.if_SA = False + self.if_Conv = False + layer = self.LSTM_layer() + return layer + + elif layer == 'ConvLSTM': + self.if_SA = False + self.if_Conv = True + layer = self.convLSTM() + return layer + + elif layer == 'SA_LSTM': + self.if_SA = True + self.if_Conv = False + layer = self.SA_LSTM(query, z=z) + return layer + + elif layer == 'SA_ConvLSTM': + self.if_SA = True + self.if_Conv = True + layer = self.SA_ConvLSTM(query, z=z, batch_size=batch_size) + return layer + elif layer == 'SA_ConvLSTM1': + self.if_mutiHead = True + + layer = self.SA_ConvLSTM1() + return layer + + else: + raise ValueError("算法尚未实现") + + def LSTM_layer(self): + input = self.inputs + batch_size = self.batch_size + units = self.units + + epoch, filter_num, dims = input.shape + # print(filter_num, dims) + + for batch in range(filter_num): + new_input = input[:, batch, :] + new_input = tf.expand_dims(new_input, axis=1) + + if batch != 0: + new_input = tf.concat([new_input, ht_1], axis=-1) + else: + new_input = tf.pad(new_input,[[0,0],[0,0],[0,self.units]]) + + Wi = self.Wi[0](new_input) + Wf = self.Wf[0](new_input) + Wc = self.Wc[0](new_input) + Wo = self.Wo[0](new_input) + + ft = tf.nn.sigmoid(Wf) + it = tf.nn.sigmoid(Wi) + ct_ = tf.nn.tanh(Wc) + ot = tf.nn.sigmoid(Wo) + + if batch != 0: + ct = tf.add(tf.multiply(ft, ct_1), tf.multiply(it, ct_)) + else: + ct = tf.add(ft, tf.multiply(it, ct_)) + ht = tf.multiply(tf.nn.tanh(ct), ot) + if batch == 0: + output = ht + else: + output = tf.concat([output, ht], axis=1) + ht_1 = ht + ct_1 = ct + + # output = tf.reshape(output, [-1, filter_num, units]) + + # print(output.shape) + return output + + def convLSTM(self): + input = self.inputs + batch_size = self.batch_size + units = self.units + + epoch, filter_num, dims = input.shape + # print(filter_num, dims) + + for batch in range(filter_num): + new_input = input[:, batch, :] + new_input = tf.expand_dims(new_input, axis=1) + + if batch != 0: + new_input = tf.concat([new_input, ht_1], axis=-1) + else: + new_input = tf.pad(new_input, [[0, 0], [0, 0], [0, self.units]]) + + Wi = self.Wi[0](new_input) + Wf = self.Wf[0](new_input) + Wc = self.Wc[0](new_input) + Wo = self.Wo[0](new_input) + + ft = tf.nn.sigmoid(Wf) + it = tf.nn.sigmoid(Wi) + ct_ = tf.nn.tanh(Wc) + ot = tf.nn.sigmoid(Wo) + + if batch != 0: + ct = tf.add(tf.multiply(ft, ct_1), tf.multiply(it, ct_)) + else: + ct = tf.add(ft, tf.multiply(it, ct_)) + ht = tf.multiply(tf.nn.tanh(ct), ot) + if batch == 0: + output = ht + else: + output = tf.concat([output, ht], axis=1) + ht_1 = ht + ct_1 = ct + + # output = tf.reshape(output, [-1, filter_num, units]) + + # print(output.shape) + return output + + def SA_LSTM(self): + output = self.SA_ConvLSTM1() + + return output + + # self_attention with true_query + def SA_ConvLSTM(self, query, z=0, batch_size=None): + # TODO 解决模型保存时,该数组会被序列化,然后返序列化以后不再是np或者是tf的问题 + query = tf.cast(query, dtype=tf.float32) + # print(query.shape) + + if batch_size == None: + batch_size = self.batch_size + + input = self.inputs + # batch_size = self.batch_size + units = self.units + + epoch, filter_num, dims = input.shape + # print(filter_num, dims) + + for batch in range(filter_num): + + new_input = input[:, batch, :] + new_input = tf.expand_dims(new_input, axis=1) + + if batch != 0: + new_input = tf.concat([new_input, ht_1], axis=-1) + + # new_input.shape=(1,50,1) + # self_attention模块 + # query_cell = query[batch * batch_size:(batch + 1) * batch_size, batch, :] + query_cell = tf.expand_dims(query[z * batch_size:(z + 1) * batch_size, batch, :], axis=-1) + # query_cell = tf.expand_dims(temp, axis=1) + new_input1 = tf.concat([new_input, query_cell], axis=-1) + + (_, _, new_input_dims) = new_input1.shape + temp1 = self.temp1[0](new_input1) + temp1 = temp1[:, :, :-1] + + temp2 = tf.nn.tanh(temp1) + + Si = self.Si[0](temp2) + ai = tf.nn.softmax(Si) + ones = tf.ones(shape=ai.shape) + + new_input = tf.multiply((ai + ones), new_input) + + Wi = self.Wi[0](new_input) + Wf = self.Wf[0](new_input) + Wc = self.Wc[0](new_input) + Wo = self.Wo[0](new_input) + + ft = tf.nn.sigmoid(Wf) + it = tf.nn.sigmoid(Wi) + ct_ = tf.nn.tanh(Wc) + ot = tf.nn.sigmoid(Wo) + + if batch != 0: + ct = tf.add(tf.multiply(ft, ct_1), tf.multiply(it, ct_)) + else: + ct = tf.add(ft, tf.multiply(it, ct_)) + ht = tf.multiply(tf.nn.tanh(ct), ot) + if batch == 0: + output = ht + else: + output = tf.concat([output, ht], axis=1) + ht_1 = ht + ct_1 = ct + + # output = tf.reshape(sum, [batch_size , filter_num, units]) + # output=tf.expand_dims(output,axis=0) + + # print(output.shape) + # print("z=", z) + + return output + + # self_attention with kqv + def SA_ConvLSTM1(self): + input = self.inputs + + epoch, filter_num, dims = input.shape + # print(filter_num, dims) + + for batch in range(filter_num): + + new_input = input[:, batch, :] + new_input = tf.expand_dims(new_input, axis=1) + + if batch == 0: + new_input = tf.pad(new_input,[[0,0],[0,0],[0,self.units]]) + + if batch != 0: + new_input = tf.concat([new_input, ht_1], axis=-1) + + # self_attention模块 + new_input = self.muti_head_attention(new_input, z=batch) + + Wi = self.Wi[0](new_input) + Wf = self.Wf[0](new_input) + Wc = self.Wc[0](new_input) + Wo = self.Wo[0](new_input) + + ft = tf.nn.sigmoid(Wf) + it = tf.nn.sigmoid(Wi) + ct_ = tf.nn.tanh(Wc) + ot = tf.nn.sigmoid(Wo) + + if batch != 0: + ct = tf.add(tf.multiply(ft, ct_1), tf.multiply(it, ct_)) + else: + ct = tf.add(ft, tf.multiply(it, ct_)) + ht = tf.multiply(tf.nn.tanh(ct), ot) + if batch == 0: + output = ht + else: + output = tf.concat([output, ht], axis=1) + ht_1 = ht + ct_1 = ct + + return output + + + + def muti_head_attention(self, inputs, z=0, training=None): + + batch, filter_num, dims = inputs.shape + + qkv = self.qkv(inputs) + qkv = self.reshape1(qkv) + # qkv(): -> [batch_size, num_patches + 1, 3 * total_embed_dim] + # 分成三份,分别对应qkv,C // self.num_heads得到每一个head的维度 + # reshape: -> [batch_size, num_patches + 1, 3, num_heads, embed_dim_per_head] + + # 用transpose方法,来调整一下维度的顺序,[2, 0, 3, 1, 4]表示调换之后的顺序 + # transpose: -> [3, batch_size, num_heads, num_patches + 1, embed_dim_per_head] + qkv = tf.transpose(qkv, [2, 0, 3, 1, 4]) + # [batch_size, num_heads, num_patches + 1, embed_dim_per_head] + q, k, v = qkv[0], qkv[1], qkv[2] + + # transpose: -> [batch_size, num_heads, embed_dim_per_head, num_patches + 1] + # 这里的矩阵相乘实际上指的是矩阵的最后两个维度相乘,而b的转置(transpose_b) + # 实际上是[batch_size, num_heads, embed_dim_per_head, num_patches + 1] + # multiply -> [batch_size, num_heads, num_patches + 1, num_patches + 1] + attn = tf.matmul(a=q, b=k, transpose_b=True) * self.scale + attn = tf.nn.softmax(attn, axis=-1) + attn = self.attn_drop(attn, training=training) + + # 与v相乘得到b + # multiply -> [batch_size, num_heads, num_patches + 1, embed_dim_per_head] + x = tf.matmul(attn, v) + # 再用transpose调换一下顺序 + # transpose: -> [batch_size, num_patches + 1, num_heads, embed_dim_per_head] + x = tf.transpose(x, [0, 2, 1, 3]) + # reshape: -> [batch_size, num_patches + 1, total_embed_dim] + + x = self.reshape2(x) + x = self.proj(x) + + + + # 与Wo相乘,进一步融合得到输出 + x = self.proj_drop(x, training=training) + + return x + +# if __name__ == '__main__': +# input=tf.random.truncated_normal(shape=[5,10]) +# LSTM(input=input).getlayer() diff --git a/TensorFlow_eaxmple/Model_train_test/model/LSTM/before/__init__.py b/TensorFlow_eaxmple/Model_train_test/model/LSTM/before/__init__.py new file mode 100644 index 0000000..32bfe4c --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/model/LSTM/before/__init__.py @@ -0,0 +1,8 @@ +#-*- encoding:utf-8 -*- + +''' +@Author : dingjiawen +@Date : 2023/6/14 13:49 +@Usage : +@Desc : +''' \ No newline at end of file diff --git a/TensorFlow_eaxmple/Model_train_test/model/LossFunction/FTMSE.py b/TensorFlow_eaxmple/Model_train_test/model/LossFunction/FTMSE.py index 37ab123..c7f9dcf 100644 --- a/TensorFlow_eaxmple/Model_train_test/model/LossFunction/FTMSE.py +++ b/TensorFlow_eaxmple/Model_train_test/model/LossFunction/FTMSE.py @@ -11,25 +11,26 @@ import tensorflow as tf import tensorflow.keras.backend as K - class FTMSE(tf.keras.losses.Loss): def call(self, y_true, y_pred): - y_true = tf.cast(y_true, tf.float64) - y_pred = tf.cast(y_pred, tf.float64) + y_true = tf.cast(y_true, tf.float32) + y_pred = tf.cast(y_pred, tf.float32) # 需要转为复数形式 - yt_fft = tf.signal.fft(tf.cast(y_true,tf.complex64)) - yp_fft = tf.signal.fft(tf.cast(y_pred,tf.complex64)) + yt_fft = tf.signal.fft(tf.cast(y_true, tf.complex64)) + yp_fft = tf.signal.fft(tf.cast(y_pred, tf.complex64)) + epoch, length, _ = yp_fft.shape # 幅值 - amp = tf.abs(yt_fft / len(yt_fft)) + yt_amp = tf.abs(yt_fft / length) + yp_amp = tf.abs(yp_fft / length) # 相角 - angle = tf.angle(freq_value) + yt_angle = tf.math.angle(yt_fft) + yp_angle = tf.math.angle(yp_fft) + time_loss = tf.keras.losses.mean_squared_error(y_true, y_pred) + amp_loss = tf.keras.losses.mean_squared_error(yt_amp, yp_amp) + angle_loss = tf.keras.losses.mean_squared_error(yt_angle, yp_angle) - time_loss = tf.reduce_mean(tf.abs(y_true - y_pred)) - - freq_loss = tf.reduce_mean(tf.abs(yt_fft - yp_fft)) - - ftLoss = time_loss+freq_loss + ftLoss = time_loss + amp_loss + angle_loss return ftLoss diff --git a/TensorFlow_eaxmple/Model_train_test/model/LossFunction/GIoU_Loss.py b/TensorFlow_eaxmple/Model_train_test/model/LossFunction/GIoU_Loss.py new file mode 100644 index 0000000..efd51f4 --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/model/LossFunction/GIoU_Loss.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- + +# coding: utf-8 + +''' +@Author : dingjiawen +@Date : 2022/7/19 10:16 +@Usage : +@Desc : +''' + +import numpy as np + +def Giou_np(bbox_p, bbox_g): + """ + :param bbox_p: predict of bbox(N,4)(x1,y1,x2,y2) + :param bbox_g: groundtruth of bbox(N,4)(x1,y1,x2,y2) + :return: + """ + # for details should go to https://arxiv.org/pdf/1902.09630.pdf + # ensure predict's bbox form + x1p = np.minimum(bbox_p[:, 0], bbox_p[:, 2]).reshape(-1,1) + x2p = np.maximum(bbox_p[:, 0], bbox_p[:, 2]).reshape(-1,1) + y1p = np.minimum(bbox_p[:, 1], bbox_p[:, 3]).reshape(-1,1) + y2p = np.maximum(bbox_p[:, 1], bbox_p[:, 3]).reshape(-1,1) + + bbox_p = np.concatenate([x1p, y1p, x2p, y2p], axis=1) + # calc area of Bg + area_p = (bbox_p[:, 2] - bbox_p[:, 0]) * (bbox_p[:, 3] - bbox_p[:, 1]) + # calc area of Bp + area_g = (bbox_g[:, 2] - bbox_g[:, 0]) * (bbox_g[:, 3] - bbox_g[:, 1]) + + # cal intersection + x1I = np.maximum(bbox_p[:, 0], bbox_g[:, 0]) + y1I = np.maximum(bbox_p[:, 1], bbox_g[:, 1]) + x2I = np.minimum(bbox_p[:, 2], bbox_g[:, 2]) + y2I = np.minimum(bbox_p[:, 3], bbox_g[:, 3]) + I = np.maximum((y2I - y1I), 0) * np.maximum((x2I - x1I), 0) + + # find enclosing box + x1C = np.minimum(bbox_p[:, 0], bbox_g[:, 0]) + y1C = np.minimum(bbox_p[:, 1], bbox_g[:, 1]) + x2C = np.maximum(bbox_p[:, 2], bbox_g[:, 2]) + y2C = np.maximum(bbox_p[:, 3], bbox_g[:, 3]) + + # calc area of Bc + area_c = (x2C - x1C) * (y2C - y1C) + U = area_p + area_g - I + iou = 1.0 * I / U + + # Giou + giou = iou - (area_c - U) / area_c + + # loss_iou = 1 - iou loss_giou = 1 - giou + loss_iou = 1.0 - iou + loss_giou = 1.0 - giou + return giou, loss_iou, loss_giou + +# def giou_tf + + + + +if __name__ == '__main__': + + p = np.array([[21,45,103,172], + [34,283,155,406], + [202,174,271,255]]) + g = np.array([[59,106,154,230], + [71,272,191,419], + [257,244,329,351]]) + Giou_np(p, g) \ No newline at end of file diff --git a/TensorFlow_eaxmple/Model_train_test/model/LossFunction/IoU_Loss.py b/TensorFlow_eaxmple/Model_train_test/model/LossFunction/IoU_Loss.py new file mode 100644 index 0000000..35fbb33 --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/model/LossFunction/IoU_Loss.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- + +# coding: utf-8 + +''' +@Author : dingjiawen +@Date : 2022/7/18 21:33 +@Usage : IoU loss Function +@Desc : +''' +import tensorflow.keras.backend as K + +# 实际上就是计算边框没有重合的部分占重合部分的比重 +def iou_loss(y_true, y_pred): + # iou loss for bounding box prediction + # input must be as [x1, y1, x2, y2] + + # AOG = Area of Groundtruth box + AoG = K.abs(K.transpose(y_true)[2] - K.transpose(y_true)[0] + 1) * K.abs( + K.transpose(y_true)[3] - K.transpose(y_true)[1] + 1) + + # AOP = Area of Predicted box + AoP = K.abs(K.transpose(y_pred)[2] - K.transpose(y_pred)[0] + 1) * K.abs( + K.transpose(y_pred)[3] - K.transpose(y_pred)[1] + 1) + + # overlaps are the co-ordinates of intersection box + overlap_0 = K.maximum(K.transpose(y_true)[0], K.transpose(y_pred)[0]) + overlap_1 = K.maximum(K.transpose(y_true)[1], K.transpose(y_pred)[1]) + overlap_2 = K.minimum(K.transpose(y_true)[2], K.transpose(y_pred)[2]) + overlap_3 = K.minimum(K.transpose(y_true)[3], K.transpose(y_pred)[3]) + + # intersection area + intersection = (overlap_2 - overlap_0 + 1) * (overlap_3 - overlap_1 + 1) + + # area of union of both boxes + union = AoG + AoP - intersection + + # iou calculation + iou = intersection / union + + # bounding values of iou to (0,1) + iou = K.clip(iou, 0.0 + K.epsilon(), 1.0 - K.epsilon()) + + # loss for the iou value + iou_loss = -K.log(iou) + + return iou_loss \ No newline at end of file diff --git a/TensorFlow_eaxmple/Model_train_test/model/LossFunction/__init__.py b/TensorFlow_eaxmple/Model_train_test/model/LossFunction/__init__.py new file mode 100644 index 0000000..2d80c0a --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/model/LossFunction/__init__.py @@ -0,0 +1,10 @@ +# -*- coding: utf-8 -*- + +# coding: utf-8 + +''' +@Author : dingjiawen +@Date : 2022/7/18 21:33 +@Usage : +@Desc : +''' \ No newline at end of file diff --git a/TensorFlow_eaxmple/Model_train_test/model/LossFunction/smooth_L1_Loss.py b/TensorFlow_eaxmple/Model_train_test/model/LossFunction/smooth_L1_Loss.py new file mode 100644 index 0000000..bd43b5e --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/model/LossFunction/smooth_L1_Loss.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- + +# coding: utf-8 + +''' +@Author : dingjiawen +@Date : 2022/7/20 14:13 +@Usage : +@Desc : +''' +import tensorflow as tf +import tensorflow.keras.backend as K + + +class SmoothL1Loss(tf.keras.losses.Loss): + def call(self, y_true, y_pred): + y_true = tf.cast(y_true, tf.float32) + y_pred = tf.cast(y_pred, tf.float32) + dif = tf.reduce_mean(tf.abs(y_true - y_pred)) + if dif < 1: + return tf.reduce_mean(0.5 * tf.square(y_pred - y_true)) + else: + return dif - 0.5 + # return tf.reduce_mean(tf.square(y_pred - y_true)) diff --git a/TensorFlow_eaxmple/Model_train_test/model/SAE/SAE_realize.py b/TensorFlow_eaxmple/Model_train_test/model/SAE/SAE_realize.py new file mode 100644 index 0000000..dd7f11c --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/model/SAE/SAE_realize.py @@ -0,0 +1,159 @@ +import tensorflow as tf +import numpy as np +import pandas as pd +from tensorflow.keras import * +import tensorflow.keras.backend as kb +import sys +import matplotlib.pyplot as plt + + +# 输入输出为16 × 1的列表 +# inputList为输入列表 +def SAEFC(inputList): + inputList = np.array(inputList) + # 输入特征个数 + inputFeatureNum = len(inputList[0]) + # 隐藏层参数个数:输入特征3倍 + hiddenNum = 3 * inputFeatureNum + # 稀疏度(密度) + density = 0.1 + + lossList = [] + saeModel = SAEModel(inputList.shape[-1], hiddenNum) + for i in range(1000): + loss = saeModel.network_learn(tf.constant(inputList)) + lossList.append(loss) + print(loss) + + # 绘制损失值图像 + x = np.arange(len(lossList)) + 1 + plt.plot(x, lossList) + plt.show() + + return saeModel + + +# 自定义隐藏层 +class SAELayer(layers.Layer): + def __init__(self, num_outputs): + super(SAELayer, self).__init__() + # 该层最后一个节点,其值固定为1, + # 前期可以按照同样的手段让该节点和其他节点一样进行计算, + # 最后在传递给下一层前,将其设置为1即可(即其值固定为1) + self.num_outputs = num_outputs + + def build(self, input_shape): + self.kernel = self.add_variable("kernel", + shape=[int(input_shape[-1]), + self.num_outputs - 1]) + self.bias = self.add_variable("bias", + shape=[self.num_outputs - 1]) + + def call(self, input): + output = tf.matmul(input, self.kernel) + self.bias + # sigmoid函数 + output = tf.nn.sigmoid(output) + bias_list = tf.ones([input.shape[0], 1]) + output = tf.concat([output, bias_list], 1) + self.result = output + return output + + +# 自定义模型 +class SAEModel(Model): + # 可以传入一些超参数,用以动态构建模型 + # __init_——()方法在创建模型对象时被调用 + # input_shape: 输入层和输出层的节点个数(输入层实际要比这多1,因为有个bias) + # hidden_shape: 隐藏层节点个数,隐藏层节点的最后一个节点值固定为1,也是bias + # 使用方法:直接传入实际的input_shape即可,在call中也直接传入原始Input_tensor即可 + # 一切关于数据适配模型的处理都在模型中实现 + def __init__(self, input_shape, hidden_shape=None): + # print("init") + # 隐藏层节点个数默认为输入层的3倍 + if hidden_shape == None: + hidden_shape = 3 * input_shape + # 调用父类__init__()方法 + super(SAEModel, self).__init__() + + self.train_loss = None + self.layer_2 = SAELayer(hidden_shape) + self.layer_3 = layers.Dense(input_shape, activation=tf.nn.sigmoid) + + def call(self, input_tensor, training=False): + # 将input_tensor最后加一列1 + bias_list = tf.ones([len(input_tensor), 1]) + input_tensor = tf.concat([input_tensor, bias_list], 1) + # 输入数据 + # x = self.layer_1(input_tensor) + hidden = self.layer_2(input_tensor) + output = self.layer_3(hidden) + return output + + def get_loss(self, input_tensor): + # print("get_loss") + bias_list = tf.ones([len(input_tensor), 1]) + new_input = tf.concat([input_tensor, bias_list], 1) + hidden = self.layer_2(new_input) + output = self.layer_3(hidden) + + # 计算loss + # 计算MSE + mse = (1 / 2) * tf.reduce_sum(kb.square(input_tensor - output)) + + # 计算权重乘法项 + alpha = 0.1 + W1 = self.layer_2.kernel + W2 = self.layer_3.kernel + weightPunish = (alpha / 2) * (tf.reduce_sum(kb.square(W1)) + tf.reduce_sum(kb.square(W2))) + + # 计算KL散度 + # 惩罚因子 + beita = 0.1 + # 每一层的期望密度 + desired_density = 0.1 + layer2_output = self.layer_2.result + + + + + # 实际密度是所有输入数据的密度的平均值 + actual_density = tf.reduce_mean(tf.math.count_nonzero(layer2_output, axis=1) / layer2_output.shape[1]) + actual_density = tf.cast(actual_density, tf.float32) + if actual_density == tf.constant(1.0, dtype=tf.float32): + actual_density = tf.constant(0.999) + actual_density = actual_density.numpy() + + KL = desired_density * np.log(desired_density / actual_density) + KL += (1 - desired_density) * np.log((1 - desired_density) / (1 - actual_density)) + KL *= beita + ans = tf.constant(mse + weightPunish + KL) + return ans + + def get_grad(self, input_tensor): + with tf.GradientTape() as tape: + # todo 原本tape只会监控由tf.Variable创建的trainable=True属性 + tape.watch(self.variables) + L = self.get_loss(input_tensor) + # 保存一下loss,用于输出 + self.train_loss = L + g = tape.gradient(L, self.variables) + return g + + def network_learn(self, input_tensor): + g = self.get_grad(input_tensor) + optimizers.Adam().apply_gradients(zip(g, self.variables)) + return self.train_loss + + # 如果模型训练好了,需要获得隐藏层的输出,直接获取麻烦,则直接运行一遍 + def getReprestation(self, input_tensor): + bias_list = tf.ones([len(input_tensor), 1]) + new_input = tf.concat([input_tensor, bias_list], 1) + hidden = self.layer_2(new_input) + return hidden + + +if __name__ == '__main__': + + saeModel = SAEModel(inputList.shape[-1], hiddenNum) + for i in range(1000): + saeModel.network_learn(tf.constant(inputList)) \ No newline at end of file diff --git a/TensorFlow_eaxmple/Model_train_test/model/SAE/__init__.py b/TensorFlow_eaxmple/Model_train_test/model/SAE/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/TensorFlow_eaxmple/Model_train_test/model/VAE/VAE_realize.py b/TensorFlow_eaxmple/Model_train_test/model/VAE/VAE_realize.py new file mode 100644 index 0000000..3fac3b6 --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/model/VAE/VAE_realize.py @@ -0,0 +1,143 @@ +import os +import tensorflow as tf +from tensorflow import keras +from PIL import Image +from matplotlib import pyplot as plt +from tensorflow.keras import Sequential, layers +import numpy as np + +tf.random.set_seed(2322) +np.random.seed(23422) + +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' +assert tf.__version__.startswith('2.') + +# 把num张图片保存到一张 +def save_images(img, name,num): + new_im = Image.new('L', (28*num, 28*num)) + index = 0 + for i in range(0, 28*num, 28): + for j in range(0, 28*num, 28): + im = img[index] + im = Image.fromarray(im, mode='L') + new_im.paste(im, (i, j)) + index += 1 + + new_im.save(name) + +# 定义超参数 +batchsz = 256 +lr = 1e-4 + +# 数据集加载,自编码器不需要标签因为是无监督学习 +(x_train, _), (x_test, _) = keras.datasets.fashion_mnist.load_data() +x_train, x_test = x_train.astype(np.float32) / 255., x_test.astype(np.float32) / 255. +train_db = tf.data.Dataset.from_tensor_slices(x_train) +train_db = train_db.shuffle(batchsz * 5).batch(batchsz) +test_db = tf.data.Dataset.from_tensor_slices(x_test) +test_db = test_db.batch(batchsz) + +# 搭建模型 +z_dim = 10 +class VAE(keras.Model): + def __init__(self,z_dim,units=256): + super(VAE, self).__init__() + self.z_dim = z_dim + self.units = units + # 编码网络 + self.vae_encoder = layers.Dense(self.units) + # 均值网络 + self.vae_mean = layers.Dense(self.z_dim) # get mean prediction + # 方差网络(均值和方差是一一对应的,所以维度相同) + self.vae_variance = layers.Dense(self.z_dim) # get variance prediction + + # 解码网络 + self.vae_decoder = layers.Dense(self.units) + # 输出网络 + self.vae_out = layers.Dense(784) + + # encoder传播的过程 + def encoder(self, x): + h = tf.nn.relu(self.vae_encoder(x)) + #计算均值 + mu = self.vae_mean(h) + #计算方差 + log_var = self.vae_variance(h) + + return mu, log_var + + # decoder传播的过程 + def decoder(self, z): + out = tf.nn.relu(self.vae_decoder(z)) + out = self.vae_out(out) + + return out + + def reparameterize(self, mu, log_var): + eps = tf.random.normal(log_var.shape) + + std = tf.exp(log_var) # 去掉log, 得到方差; + std = std**0.5 # 开根号,得到标准差; + + z = mu + std * eps + return z + + def call(self, inputs): + mu, log_var = self.encoder(inputs) + # reparameterizaion trick:最核心的部分 + z = self.reparameterize(mu, log_var) + # decoder 进行还原 + x_hat = self.decoder(z) + + # Variational auto-encoder除了前向传播不同之外,还有一个额外的约束; + # 这个约束使得你的mu, var更接近正太分布;所以我们把mu, log_var返回; + return x_hat, mu, log_var + +model = VAE(z_dim,units=128) +model.build(input_shape=(128, 784)) +optimizer = keras.optimizers.Adam(lr=lr) + +epochs = 30 +for epoch in range(epochs): + + for step, x in enumerate(train_db): + + x = tf.reshape(x, [-1, 784]) + with tf.GradientTape() as tape: + # shape + x_hat, mu, log_var = model(x) + + # 把每个像素点当成一个二分类的问题; + rec_loss = tf.losses.binary_crossentropy(x, x_hat, from_logits=True) + rec_loss = tf.reduce_mean(rec_loss) + + # compute kl divergence (mu, var) ~ N(0, 1): 我们得到的均值方差和正太分布的; + # 链接参考: https://stats.stackexchange.com/questions/7440/kl-divergence-between-two-univariate-gaussians + kl_div = -0.5 * (log_var + 1 -mu**2 - tf.exp(log_var)) + kl_div = tf.reduce_mean(kl_div) / batchsz + loss = rec_loss + 1. * kl_div + + grads = tape.gradient(loss, model.trainable_variables) + optimizer.apply_gradients(zip(grads, model.trainable_variables)) + + if step % 100 ==0: + print('\repoch: %3d, step:%4d, kl_div: %5f, rec_loss:%9f' %(epoch, step, float(kl_div), float(rec_loss)),end="") + + num_pic = 9 + # evaluation 1: 从正太分布直接sample; + z = tf.random.normal((batchsz, z_dim)) # 从正太分布中sample这个尺寸的 + logits = model.decoder(z) # 通过这个得到decoder + x_hat = tf.sigmoid(logits) + x_hat = tf.reshape(x_hat, [-1, 28, 28]).numpy() * 255. + logits = x_hat.astype(np.uint8) # 标准的图片格式; + save_images(logits, 'd:\\vae_images\\sampled_epoch%d.png' %epoch,num_pic) # 直接sample出的正太分布; + + # evaluation 2: 正常的传播过程; + x = next(iter(test_db)) + x = tf.reshape(x, [-1, 784]) + x_hat_logits, _, _ = model(x) # 前向传播返回的还有mu, log_var + x_hat = tf.sigmoid(x_hat_logits) + x_hat = tf.reshape(x_hat, [-1, 28, 28]).numpy() * 255. + x_hat = x_hat.astype(np.uint8) # 标准的图片格式; + # print(x_hat.shape) + save_images(x_hat, 'd:\\vae_images\\rec_epoch%d.png' %epoch,num_pic) diff --git a/TensorFlow_eaxmple/Model_train_test/model/VAE/__init__.py b/TensorFlow_eaxmple/Model_train_test/model/VAE/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/TensorFlow_eaxmple/Model_train_test/model/VMD/VMD_realize.py b/TensorFlow_eaxmple/Model_train_test/model/VMD/VMD_realize.py new file mode 100644 index 0000000..8072b7b --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/model/VMD/VMD_realize.py @@ -0,0 +1,66 @@ +from matplotlib import pyplot as plt +import numpy as np +from scipy.signal import hilbert + + +class VMD: + def __init__(self, K, alpha, tau, tol=1e-7, maxIters=200, eps=1e-9): + """ + :param K: 模态数 + :param alpha: 每个模态初始中心约束强度 + :param tau: 对偶项的梯度下降学习率 + :param tol: 终止阈值 + :param maxIters: 最大迭代次数 + :param eps: eps + """ + self.K = K + self.alpha = alpha + self.tau = tau + self.tol = tol + self.maxIters = maxIters + self.eps = eps + + def __call__(self, f): + T = f.shape[0] + t = np.linspace(1, T, T) / T + omega = t - 1. / T + # 转换为解析信号 + f = hilbert(f) + f_hat = np.fft.fft(f) + u_hat = np.zeros((self.K, T), dtype=np.complex) + omega_K = np.zeros((self.K,)) + lambda_hat = np.zeros((T,), dtype=np.complex) + # 用以判断 + u_hat_pre = np.zeros((self.K, T), dtype=np.complex) + u_D = self.tol + self.eps + + # 迭代 + n = 0 + while n < self.maxIters and u_D > self.tol: + for k in range(self.K): + # u_hat + sum_u_hat = np.sum(u_hat, axis=0) - u_hat[k, :] + res = f_hat - sum_u_hat + u_hat[k, :] = (res + lambda_hat / 2) / (1 + self.alpha * (omega - omega_K[k]) ** 2) + + # omega + u_hat_k_2 = np.abs(u_hat[k, :]) ** 2 + omega_K[k] = np.sum(omega * u_hat_k_2) / np.sum(u_hat_k_2) + + # lambda_hat + sum_u_hat = np.sum(u_hat, axis=0) + res = f_hat - sum_u_hat + lambda_hat -= self.tau * res + + n += 1 + u_D = np.sum(np.abs(u_hat - u_hat_pre) ** 2) + u_hat_pre[::] = u_hat[::] + + # 重构,反傅立叶之后取实部 + u = np.real(np.fft.ifft(u_hat, axis=-1)) + + omega_K = omega_K * T + idx = np.argsort(omega_K) + omega_K = omega_K[idx] + u = u[idx, :] + return u, omega_K \ No newline at end of file diff --git a/TensorFlow_eaxmple/Model_train_test/model/VMD/__init__.py b/TensorFlow_eaxmple/Model_train_test/model/VMD/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/TensorFlow_eaxmple/Model_train_test/model/VMD/test.py b/TensorFlow_eaxmple/Model_train_test/model/VMD/test.py new file mode 100644 index 0000000..fa69868 --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/model/VMD/test.py @@ -0,0 +1,45 @@ +from matplotlib import pyplot as plt +import numpy as np +from scipy.signal import hilbert +from model.VMD.VMD_realize import VMD + +T = 1000 +fs = 1. / T +t = np.linspace(0, 1, 1000, endpoint=True) +f_1 = 10 +f_2 = 50 +f_3 = 100 +mode_1 = (2 * t) ** 2 +mode_2 = np.sin(2 * np.pi * f_1 * t) +mode_3 = np.sin(2 * np.pi * f_2 * t) +mode_4 = np.sin(2 * np.pi * f_3 * t) +f = mode_1 + mode_2 + mode_3 + mode_4 + 0.5 * np.random.randn(1000) + +plt.figure(figsize=(6, 3), dpi=150) +plt.plot(f, linewidth=1) + +K = 4 +alpha = 2000 +tau = 1e-6 +vmd = VMD(K, alpha, tau) +u, omega_K = vmd(f) +omega_K +# array([0.85049797, 10.08516203, 50.0835613, 100.13259275])) +plt.figure(figsize=(5, 7), dpi=200) +plt.subplot(4, 1, 1) +plt.plot(mode_1, linewidth=0.5, linestyle='--') +plt.plot(u[0, :], linewidth=0.2, c='r') + +plt.subplot(4, 1, 2) +plt.plot(mode_2, linewidth=0.5, linestyle='--') +plt.plot(u[1, :], linewidth=0.2, c='r') + +plt.subplot(4, 1, 3) +plt.plot(mode_3, linewidth=0.5, linestyle='--') +plt.plot(u[2, :], linewidth=0.2, c='r') + +plt.subplot(4, 1, 4) +plt.plot(mode_4, linewidth=0.5, linestyle='--') +plt.plot(u[3, :], linewidth=0.2, c='r') +plt.show() +# [] \ No newline at end of file