暑假更新

This commit is contained in:
dingjiawen@xiaomi.com 2023-09-14 16:24:59 +08:00
parent 1d7bfec0f8
commit 72764ff529
8 changed files with 274 additions and 6 deletions

View File

@ -0,0 +1,10 @@
package com.atguigu.scala
class AAA(bbb:BBB) {
def open(): Unit = {
bbb.open()
println("aaaOpen")
}
}

View File

@ -0,0 +1,15 @@
package com.atguigu.scala
class BBB {
def this(a:Int,b:Int) {
this()
println(a)
println(b)
}
def open() ={
println("open")
}
}

View File

@ -0,0 +1,9 @@
package com.atguigu.scala
object test111 {
def main(args: Array[String]): Unit = {
new AAA(new BBB(1,2)).open()
}
}

View File

@ -2,7 +2,11 @@ package com.markilue.leecode;
import cn.hutool.json.JSONObject; import cn.hutool.json.JSONObject;
import cn.hutool.json.JSONUtil; import cn.hutool.json.JSONUtil;
import com.cqu.phmapiclientsdk.client.PhmApiClient;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;
/** /**
*@BelongsProject: Leecode *@BelongsProject: Leecode
@ -15,9 +19,9 @@ import com.cqu.phmapiclientsdk.client.PhmApiClient;
public class Test1 { public class Test1 {
public static void main(String[] args) { public static void main(String[] args) {
PhmApiClient phmApiClient = new PhmApiClient("875f0554b4e041b3ba4be50cb68eb94f", "438fdae618794937a5333cfb6856a601"); String dateString = "20230822";
JSONObject jsonObject = phmApiClient.invokeByURL("http://172.28.9.61:8090/api/gas-turbine/things/search/", "{\"page_index\": 1, \"page_size\": 10, \"thing_group_uuid\":\"c1b3eb711fbd4475b262fe84fc965ea1\"}", "POST"); DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyyMMdd");
String s = JSONUtil.toJsonStr(jsonObject); LocalDate dateTime = LocalDate.parse(dateString, formatter);
System.out.println(s); System.out.println("Parsed LocalDateTime: " + dateTime);
} }
} }

View File

@ -16,7 +16,7 @@ public class Fibonaqi {
public static void main(String[] args) { public static void main(String[] args) {
int n = 5; int n = 5;
System.out.println(fibonacci(1, 1, 10)); System.out.println(fibonacci(1, 1, 10));
Arrays.copyOf()
} }
public static int fibonacci(int first, int second, int n) { public static int fibonacci(int first, int second, int n) {

View File

@ -0,0 +1,117 @@
#! -*- coding: utf-8 -*-
# 线性循环单元Linear Recurrent Unit
# tensorflow 1.15 + bert4keras 0.11.4 测试通过
from bert4keras.layers import *
class LRU(Layer):
"""线性循环单元
链接1https://arxiv.org/abs/2303.06349
链接2https://kexue.fm/archives/9554
"""
def __init__(
self,
units,
activation='linear',
use_bias=True,
unroll=True, # unroll可以加速训练但是会增加显存消耗
kernel_initializer='glorot_uniform',
**kwargs
):
super(LRU, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.use_bias = use_bias
self.unroll = unroll
self.kernel_initializer = initializers.get(kernel_initializer)
@integerize_shape
def build(self, input_shape):
super(LRU, self).build(input_shape)
hidden_size = input_shape[-1]
self.i_dense = Dense(
units=self.units * 2,
use_bias=self.use_bias,
kernel_initializer=self.kernel_initializer
)
self.o_dense = Dense(
units=hidden_size,
use_bias=self.use_bias,
activation=self.activation,
kernel_initializer=self.kernel_initializer
)
def initializer(shape, dtype=None):
r_min, r_max = 0.9, 0.999
u1 = np.random.random(size=shape[1])
u2 = np.random.random(size=shape[1])
nu_log = np.log(
-0.5 * np.log(u1 * (r_max**2 - r_min**2) + r_min**2)
)
theta_log = np.log(u2 * np.pi * 2)
gamma_log = np.log(np.sqrt(1 - np.exp(-np.exp(nu_log))**2))
return np.array([nu_log, theta_log, gamma_log])
self.params_log = self.add_weight(
name='params_log', shape=(3, self.units), initializer=initializer
)
@recompute_grad
def call(self, inputs, mask=None):
u = self.i_dense(inputs)
params = K.exp(self.params_log)
nu, theta, gamma = params[0], params[1], params[2]
if self.unroll:
L_in = K.int_shape(u)[1]
assert L_in is not None, 'input_length can not be None while unroll=True'
log2_L = int(np.ceil(np.log2(L_in)))
else:
L_in = K.shape(u)[1]
log2_L = K.log(K.cast(L_in, K.floatx())) / K.log(2.)
log2_L = K.cast(tf.ceil(log2_L), 'int32')
u = tf.complex(u[..., ::2], u[..., 1::2])
u = tf.pad(u, [[0, 0], [0, 2**log2_L - K.shape(u)[1]], [0, 0]])
B, L, D = K.shape(u)[0], K.shape(u)[1], K.int_shape(u)[-1]
def lru(i, x):
l = 2**i
x = K.reshape(x, [B * L // l, l, D])
x1, x2 = x[:, :l // 2], x[:, l // 2:]
pos = K.arange(1, l // 2 + 1, dtype=K.floatx())
nus = tf.einsum('n,d->nd', pos, nu)
thetas = tf.einsum('n,d->nd', pos, theta)
lambs = K.exp(tf.complex(-nus, thetas))
x2 = x2 + lambs * x1[:, -1:]
x = K.concatenate([x1, x2], axis=1)
if (not self.unroll) and K.int_shape(u)[1] is not None:
x = K.reshape(x, [B, L, D])
return i + 1, x
if self.unroll:
x = u
for i in range(log2_L):
_, x = lru(i + 1, x)
else:
_, x = tf.while_loop(lambda i, x: i <= log2_L, lru, [1, u])
x = x[:, :L_in] * tf.complex(gamma, 0.)
x = K.concatenate([tf.real(x), tf.imag(x)], axis=-1)
return self.o_dense(x)
def get_config(self):
config = {
'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'unroll': self.unroll,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
}
base_config = super(LRU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))

View File

@ -0,0 +1,113 @@
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Layer, Dense, activations, initializers
from tensorflow.keras import backend as K
class LRU(Layer):
"""线性循环单元
链接1https://arxiv.org/abs/2303.06349
链接2https://kexue.fm/archives/9554
"""
def __init__(
self,
units,
activation='linear',
use_bias=True,
unroll=True, # unroll可以加速训练但是会增加显存消耗
kernel_initializer='glorot_uniform',
**kwargs
):
super(LRU, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.use_bias = use_bias
self.unroll = unroll
self.kernel_initializer = initializers.get(kernel_initializer)
def build(self, input_shape):
super(LRU, self).build(input_shape)
hidden_size = input_shape[-1]
self.i_dense = Dense(
units=self.units * 2,
use_bias=self.use_bias,
kernel_initializer=self.kernel_initializer
)
self.o_dense = Dense(
units=hidden_size,
use_bias=self.use_bias,
activation=self.activation,
kernel_initializer=self.kernel_initializer
)
def initializer(shape, dtype=None):
r_min, r_max = 0.9, 0.999
u1 = np.random.random(size=shape[1])
u2 = np.random.random(size=shape[1])
nu_log = np.log(
-0.5 * np.log(u1 * (r_max**2 - r_min**2) + r_min**2)
)
theta_log = np.log(u2 * np.pi * 2)
gamma_log = np.log(np.sqrt(1 - np.exp(-np.exp(nu_log))**2))
return np.array([nu_log, theta_log, gamma_log])
self.params_log = self.add_weight(
name='params_log', shape=(3, self.units), initializer=initializer
)
@tf.function
def call(self, inputs, mask=None):
u = self.i_dense(inputs)
params = tf.exp(self.params_log)
nu, theta, gamma = params[0], params[1], params[2]
if self.unroll:
L_in = K.int_shape(u)[1]
assert L_in is not None, 'input_length can not be None while unroll=True'
log2_L = tf.cast(tf.math.ceil(tf.math.log(L_in) / tf.math.log(2.)), tf.int32)
else:
L_in = tf.shape(u)[1]
log2_L = tf.cast(tf.math.ceil(tf.math.log(tf.cast(L_in, dtype=tf.float32)) / tf.math.log(2.)), tf.int32)
u = tf.complex(u[..., ::2], u[..., 1::2])
u = tf.pad(u, [[0, 0], [0, 2**log2_L - tf.shape(u)[1]], [0, 0]])
B, L, D = tf.shape(u)[0], tf.shape(u)[1], tf.shape(u)[-1]
def lru(i, x):
l = 2**i
x = tf.reshape(x, [B * L // l, l, D])
x1, x2 = x[:, :l // 2], x[:, l // 2:]
pos = tf.range(1, l // 2 + 1, dtype=tf.float32)
nus = tf.einsum('n,d->nd', pos, nu)
thetas = tf.einsum('n,d->nd', pos, theta)
lambs = tf.exp(tf.complex(-nus, thetas))
x2 = x2 + lambs * x1[:, -1:]
x = tf.concat([x1, x2], axis=1)
if (not self.unroll) and tf.shape(u)[1] is not None:
x = tf.reshape(x, [B, L, D])
return i + 1, x
if self.unroll:
x = u
for i in range(log2_L):
_, x = lru(i + 1, x)
else:
_, x = tf.while_loop(lambda i, x: i <= log2_L, lru, [1, u])
x = x[:, :L_in] * tf.complex(gamma, 0.)
x = tf.concat([tf.math.real(x), tf.math.imag(x)], axis=-1)
return self.o_dense(x)
def get_config(self):
config = {
'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'unroll': self.unroll,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
}
base_config = super(LRU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))