From 72764ff529099c2a3d32ac775938423f94d9b493 Mon Sep 17 00:00:00 2001 From: "dingjiawen@xiaomi.com" Date: Thu, 14 Sep 2023 16:24:59 +0800 Subject: [PATCH] =?UTF-8?q?=E6=9A=91=E5=81=87=E6=9B=B4=E6=96=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../src/main/java/com/atguigu/scala/AAA.scala | 10 ++ .../src/main/java/com/atguigu/scala/BBB.scala | 15 +++ .../main/java/com/atguigu/scala/test111.scala | 9 ++ .../main/java/com/markilue/leecode/Test1.java | 14 ++- .../com/markilue/leecode/test/Fibonaqi.java | 2 +- .../RUL/otherIdea/lru/__init__.py | 0 .../Model_train_test/RUL/otherIdea/lru/lru.py | 117 ++++++++++++++++++ .../RUL/otherIdea/lru/lru2.0.py | 113 +++++++++++++++++ 8 files changed, 274 insertions(+), 6 deletions(-) create mode 100644 Big_data_example/scala/scala0224/src/main/java/com/atguigu/scala/AAA.scala create mode 100644 Big_data_example/scala/scala0224/src/main/java/com/atguigu/scala/BBB.scala create mode 100644 Big_data_example/scala/scala0224/src/main/java/com/atguigu/scala/test111.scala create mode 100644 TensorFlow_eaxmple/Model_train_test/RUL/otherIdea/lru/__init__.py create mode 100644 TensorFlow_eaxmple/Model_train_test/RUL/otherIdea/lru/lru.py create mode 100644 TensorFlow_eaxmple/Model_train_test/RUL/otherIdea/lru/lru2.0.py diff --git a/Big_data_example/scala/scala0224/src/main/java/com/atguigu/scala/AAA.scala b/Big_data_example/scala/scala0224/src/main/java/com/atguigu/scala/AAA.scala new file mode 100644 index 0000000..8411e68 --- /dev/null +++ b/Big_data_example/scala/scala0224/src/main/java/com/atguigu/scala/AAA.scala @@ -0,0 +1,10 @@ +package com.atguigu.scala + +class AAA(bbb:BBB) { + + def open(): Unit = { + bbb.open() + println("aaaOpen") + } + +} diff --git a/Big_data_example/scala/scala0224/src/main/java/com/atguigu/scala/BBB.scala b/Big_data_example/scala/scala0224/src/main/java/com/atguigu/scala/BBB.scala new file mode 100644 index 0000000..416d02d --- /dev/null +++ b/Big_data_example/scala/scala0224/src/main/java/com/atguigu/scala/BBB.scala @@ -0,0 +1,15 @@ +package com.atguigu.scala + +class BBB { + + def this(a:Int,b:Int) { + this() + println(a) + println(b) + } + + def open() ={ + println("open") + } + +} diff --git a/Big_data_example/scala/scala0224/src/main/java/com/atguigu/scala/test111.scala b/Big_data_example/scala/scala0224/src/main/java/com/atguigu/scala/test111.scala new file mode 100644 index 0000000..78db09f --- /dev/null +++ b/Big_data_example/scala/scala0224/src/main/java/com/atguigu/scala/test111.scala @@ -0,0 +1,9 @@ +package com.atguigu.scala + +object test111 { + + def main(args: Array[String]): Unit = { + new AAA(new BBB(1,2)).open() + } + +} diff --git a/Leecode/src/main/java/com/markilue/leecode/Test1.java b/Leecode/src/main/java/com/markilue/leecode/Test1.java index c49f82d..610bd2e 100644 --- a/Leecode/src/main/java/com/markilue/leecode/Test1.java +++ b/Leecode/src/main/java/com/markilue/leecode/Test1.java @@ -2,7 +2,11 @@ package com.markilue.leecode; import cn.hutool.json.JSONObject; import cn.hutool.json.JSONUtil; -import com.cqu.phmapiclientsdk.client.PhmApiClient; + + +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.format.DateTimeFormatter; /** *@BelongsProject: Leecode @@ -15,9 +19,9 @@ import com.cqu.phmapiclientsdk.client.PhmApiClient; public class Test1 { public static void main(String[] args) { - PhmApiClient phmApiClient = new PhmApiClient("875f0554b4e041b3ba4be50cb68eb94f", "438fdae618794937a5333cfb6856a601"); - JSONObject jsonObject = phmApiClient.invokeByURL("http://172.28.9.61:8090/api/gas-turbine/things/search/", "{\"page_index\": 1, \"page_size\": 10, \"thing_group_uuid\":\"c1b3eb711fbd4475b262fe84fc965ea1\"}", "POST"); - String s = JSONUtil.toJsonStr(jsonObject); - System.out.println(s); + String dateString = "20230822"; + DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyyMMdd"); + LocalDate dateTime = LocalDate.parse(dateString, formatter); + System.out.println("Parsed LocalDateTime: " + dateTime); } } diff --git a/Leecode/src/main/java/com/markilue/leecode/test/Fibonaqi.java b/Leecode/src/main/java/com/markilue/leecode/test/Fibonaqi.java index 1de7bb5..32e0640 100644 --- a/Leecode/src/main/java/com/markilue/leecode/test/Fibonaqi.java +++ b/Leecode/src/main/java/com/markilue/leecode/test/Fibonaqi.java @@ -16,7 +16,7 @@ public class Fibonaqi { public static void main(String[] args) { int n = 5; System.out.println(fibonacci(1, 1, 10)); - Arrays.copyOf() + } public static int fibonacci(int first, int second, int n) { diff --git a/TensorFlow_eaxmple/Model_train_test/RUL/otherIdea/lru/__init__.py b/TensorFlow_eaxmple/Model_train_test/RUL/otherIdea/lru/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/TensorFlow_eaxmple/Model_train_test/RUL/otherIdea/lru/lru.py b/TensorFlow_eaxmple/Model_train_test/RUL/otherIdea/lru/lru.py new file mode 100644 index 0000000..5acb8a7 --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/RUL/otherIdea/lru/lru.py @@ -0,0 +1,117 @@ +#! -*- coding: utf-8 -*- +# 线性循环单元(Linear Recurrent Unit) +# tensorflow 1.15 + bert4keras 0.11.4 测试通过 + +from bert4keras.layers import * + + +class LRU(Layer): + """线性循环单元 + 链接1:https://arxiv.org/abs/2303.06349 + 链接2:https://kexue.fm/archives/9554 + """ + def __init__( + self, + units, + activation='linear', + use_bias=True, + unroll=True, # unroll可以加速训练,但是会增加显存消耗 + kernel_initializer='glorot_uniform', + **kwargs + ): + super(LRU, self).__init__(**kwargs) + self.units = units + self.activation = activations.get(activation) + self.use_bias = use_bias + self.unroll = unroll + self.kernel_initializer = initializers.get(kernel_initializer) + + @integerize_shape + def build(self, input_shape): + super(LRU, self).build(input_shape) + hidden_size = input_shape[-1] + self.i_dense = Dense( + units=self.units * 2, + use_bias=self.use_bias, + kernel_initializer=self.kernel_initializer + ) + self.o_dense = Dense( + units=hidden_size, + use_bias=self.use_bias, + activation=self.activation, + kernel_initializer=self.kernel_initializer + ) + + def initializer(shape, dtype=None): + r_min, r_max = 0.9, 0.999 + u1 = np.random.random(size=shape[1]) + u2 = np.random.random(size=shape[1]) + nu_log = np.log( + -0.5 * np.log(u1 * (r_max**2 - r_min**2) + r_min**2) + ) + theta_log = np.log(u2 * np.pi * 2) + gamma_log = np.log(np.sqrt(1 - np.exp(-np.exp(nu_log))**2)) + return np.array([nu_log, theta_log, gamma_log]) + + self.params_log = self.add_weight( + name='params_log', shape=(3, self.units), initializer=initializer + ) + + @recompute_grad + def call(self, inputs, mask=None): + u = self.i_dense(inputs) + params = K.exp(self.params_log) + nu, theta, gamma = params[0], params[1], params[2] + + if self.unroll: + L_in = K.int_shape(u)[1] + assert L_in is not None, 'input_length can not be None while unroll=True' + log2_L = int(np.ceil(np.log2(L_in))) + else: + L_in = K.shape(u)[1] + log2_L = K.log(K.cast(L_in, K.floatx())) / K.log(2.) + log2_L = K.cast(tf.ceil(log2_L), 'int32') + + u = tf.complex(u[..., ::2], u[..., 1::2]) + u = tf.pad(u, [[0, 0], [0, 2**log2_L - K.shape(u)[1]], [0, 0]]) + B, L, D = K.shape(u)[0], K.shape(u)[1], K.int_shape(u)[-1] + + def lru(i, x): + l = 2**i + x = K.reshape(x, [B * L // l, l, D]) + x1, x2 = x[:, :l // 2], x[:, l // 2:] + + pos = K.arange(1, l // 2 + 1, dtype=K.floatx()) + nus = tf.einsum('n,d->nd', pos, nu) + thetas = tf.einsum('n,d->nd', pos, theta) + lambs = K.exp(tf.complex(-nus, thetas)) + + x2 = x2 + lambs * x1[:, -1:] + x = K.concatenate([x1, x2], axis=1) + if (not self.unroll) and K.int_shape(u)[1] is not None: + x = K.reshape(x, [B, L, D]) + + return i + 1, x + + if self.unroll: + x = u + for i in range(log2_L): + _, x = lru(i + 1, x) + else: + _, x = tf.while_loop(lambda i, x: i <= log2_L, lru, [1, u]) + + x = x[:, :L_in] * tf.complex(gamma, 0.) + x = K.concatenate([tf.real(x), tf.imag(x)], axis=-1) + return self.o_dense(x) + + def get_config(self): + config = { + 'units': self.units, + 'activation': activations.serialize(self.activation), + 'use_bias': self.use_bias, + 'unroll': self.unroll, + 'kernel_initializer': + initializers.serialize(self.kernel_initializer), + } + base_config = super(LRU, self).get_config() + return dict(list(base_config.items()) + list(config.items())) \ No newline at end of file diff --git a/TensorFlow_eaxmple/Model_train_test/RUL/otherIdea/lru/lru2.0.py b/TensorFlow_eaxmple/Model_train_test/RUL/otherIdea/lru/lru2.0.py new file mode 100644 index 0000000..08086c2 --- /dev/null +++ b/TensorFlow_eaxmple/Model_train_test/RUL/otherIdea/lru/lru2.0.py @@ -0,0 +1,113 @@ +import numpy as np +import tensorflow as tf +from tensorflow.keras.layers import Layer, Dense, activations, initializers +from tensorflow.keras import backend as K + +class LRU(Layer): + """线性循环单元 + 链接1:https://arxiv.org/abs/2303.06349 + 链接2:https://kexue.fm/archives/9554 + """ + def __init__( + self, + units, + activation='linear', + use_bias=True, + unroll=True, # unroll可以加速训练,但是会增加显存消耗 + kernel_initializer='glorot_uniform', + **kwargs + ): + super(LRU, self).__init__(**kwargs) + self.units = units + self.activation = activations.get(activation) + self.use_bias = use_bias + self.unroll = unroll + self.kernel_initializer = initializers.get(kernel_initializer) + + def build(self, input_shape): + super(LRU, self).build(input_shape) + hidden_size = input_shape[-1] + self.i_dense = Dense( + units=self.units * 2, + use_bias=self.use_bias, + kernel_initializer=self.kernel_initializer + ) + self.o_dense = Dense( + units=hidden_size, + use_bias=self.use_bias, + activation=self.activation, + kernel_initializer=self.kernel_initializer + ) + + def initializer(shape, dtype=None): + r_min, r_max = 0.9, 0.999 + u1 = np.random.random(size=shape[1]) + u2 = np.random.random(size=shape[1]) + nu_log = np.log( + -0.5 * np.log(u1 * (r_max**2 - r_min**2) + r_min**2) + ) + theta_log = np.log(u2 * np.pi * 2) + gamma_log = np.log(np.sqrt(1 - np.exp(-np.exp(nu_log))**2)) + return np.array([nu_log, theta_log, gamma_log]) + + self.params_log = self.add_weight( + name='params_log', shape=(3, self.units), initializer=initializer + ) + + @tf.function + def call(self, inputs, mask=None): + u = self.i_dense(inputs) + params = tf.exp(self.params_log) + nu, theta, gamma = params[0], params[1], params[2] + + if self.unroll: + L_in = K.int_shape(u)[1] + assert L_in is not None, 'input_length can not be None while unroll=True' + log2_L = tf.cast(tf.math.ceil(tf.math.log(L_in) / tf.math.log(2.)), tf.int32) + else: + L_in = tf.shape(u)[1] + log2_L = tf.cast(tf.math.ceil(tf.math.log(tf.cast(L_in, dtype=tf.float32)) / tf.math.log(2.)), tf.int32) + + u = tf.complex(u[..., ::2], u[..., 1::2]) + u = tf.pad(u, [[0, 0], [0, 2**log2_L - tf.shape(u)[1]], [0, 0]]) + B, L, D = tf.shape(u)[0], tf.shape(u)[1], tf.shape(u)[-1] + + def lru(i, x): + l = 2**i + x = tf.reshape(x, [B * L // l, l, D]) + x1, x2 = x[:, :l // 2], x[:, l // 2:] + + pos = tf.range(1, l // 2 + 1, dtype=tf.float32) + nus = tf.einsum('n,d->nd', pos, nu) + thetas = tf.einsum('n,d->nd', pos, theta) + lambs = tf.exp(tf.complex(-nus, thetas)) + + x2 = x2 + lambs * x1[:, -1:] + x = tf.concat([x1, x2], axis=1) + if (not self.unroll) and tf.shape(u)[1] is not None: + x = tf.reshape(x, [B, L, D]) + + return i + 1, x + + if self.unroll: + x = u + for i in range(log2_L): + _, x = lru(i + 1, x) + else: + _, x = tf.while_loop(lambda i, x: i <= log2_L, lru, [1, u]) + + x = x[:, :L_in] * tf.complex(gamma, 0.) + x = tf.concat([tf.math.real(x), tf.math.imag(x)], axis=-1) + return self.o_dense(x) + + def get_config(self): + config = { + 'units': self.units, + 'activation': activations.serialize(self.activation), + 'use_bias': self.use_bias, + 'unroll': self.unroll, + 'kernel_initializer': + initializers.serialize(self.kernel_initializer), + } + base_config = super(LRU, self).get_config() + return dict(list(base_config.items()) + list(config.items())) \ No newline at end of file