leetcode更新
This commit is contained in:
parent
a35ccc2f63
commit
aa79b3d094
|
|
@ -25,7 +25,7 @@ from pylab import *
|
|||
'''
|
||||
hidden_num = 10 # LSTM细胞个数
|
||||
feature = 10 # 一个点的维度
|
||||
batch_size = 32
|
||||
batch_size = 8
|
||||
EPOCH = 1000
|
||||
unit = 512 # LSTM的维度
|
||||
predict_num = 50 # 预测个数
|
||||
|
|
@ -105,8 +105,6 @@ train_data.shape: (total_dims - filter_num - 1, filter_num,dims) :(570,600,30)
|
|||
predict_data.shape: (total_dims - filter_num, filter_num) :(571,600,30)
|
||||
train_label.shape: (total_dims - filter_num - 1, filter_num) :(570,600)
|
||||
'''
|
||||
|
||||
|
||||
def remove(train_data, train_label, batch_size):
|
||||
epoch, _, _ = train_data.shape
|
||||
size = int(epoch / batch_size)
|
||||
|
|
@ -118,8 +116,6 @@ train_data.shape: (1230, 10, 10)
|
|||
train_label.shape: (1230, 10)
|
||||
train_label_single.shape: (1230,)
|
||||
'''
|
||||
|
||||
|
||||
def splitValData(data, label, label_single, predict_num=50):
|
||||
sample, hidden, feature = data.shape
|
||||
|
||||
|
|
@ -139,7 +135,7 @@ def predict_model_multi(filter_num, dims):
|
|||
tf.config.experimental_run_functions_eagerly(True)
|
||||
|
||||
input = tf.keras.Input(shape=[filter_num, dims])
|
||||
input = tf.cast(input, tf.float32)
|
||||
# input = tf.cast(input, tf.float32)
|
||||
|
||||
#### 官方
|
||||
# LSTM = tf.keras.layers.LSTM(units=512, return_sequences=True)(input)
|
||||
|
|
@ -210,31 +206,31 @@ if __name__ == '__main__':
|
|||
train_label,
|
||||
train_label_single,
|
||||
predict_num=predict_num)
|
||||
# #### TODO 训练
|
||||
model = predict_model_multi(hidden_num, feature)
|
||||
checkpoint = tf.keras.callbacks.ModelCheckpoint(
|
||||
filepath=save_name,
|
||||
monitor='val_loss',
|
||||
verbose=2,
|
||||
save_best_only=True,
|
||||
mode='min')
|
||||
lr_scheduler = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=20, min_lr=0.001)
|
||||
|
||||
model.compile(optimizer=tf.optimizers.SGD(), loss=tf.losses.mse)
|
||||
# model.compile(optimizer=tf.optimizers.SGD(learning_rate=0.001), loss=FTMSE())
|
||||
model.summary()
|
||||
early_stop = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=100, mode='min', verbose=1)
|
||||
|
||||
history = model.fit(train_data, train_label, epochs=EPOCH,
|
||||
batch_size=batch_size, validation_data=(val_data, val_label_single), shuffle=True, verbose=2,
|
||||
callbacks=[checkpoint, lr_scheduler, early_stop])
|
||||
# # #### TODO 训练
|
||||
# model = predict_model_multi(hidden_num, feature)
|
||||
# checkpoint = tf.keras.callbacks.ModelCheckpoint(
|
||||
# filepath=save_name,
|
||||
# monitor='val_loss',
|
||||
# verbose=2,
|
||||
# save_best_only=True,
|
||||
# mode='min')
|
||||
# lr_scheduler = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=20, min_lr=0.001)
|
||||
#
|
||||
# model.compile(optimizer=tf.optimizers.SGD(), loss=tf.losses.mse)
|
||||
# # model.compile(optimizer=tf.optimizers.SGD(learning_rate=0.001), loss=FTMSE())
|
||||
# model.summary()
|
||||
# early_stop = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=100, mode='min', verbose=1)
|
||||
#
|
||||
# history = model.fit(train_data, train_label, epochs=EPOCH,
|
||||
# batch_size=batch_size, validation_data=(val_data, val_label_single), shuffle=True, verbose=2,
|
||||
# callbacks=[checkpoint, lr_scheduler, early_stop])
|
||||
|
||||
#### TODO 测试
|
||||
|
||||
# trained_model = tf.keras.models.load_model(save_name, custom_objects={'LSTMLayer': LSTMLayer, 'FTMSE': FTMSE})
|
||||
|
||||
# todo 解决自定义loss无法导入的问题
|
||||
trained_model = tf.keras.models.load_model(save_name, compile=False, custom_objects={'LSTMLayer': LSTMLayer,'DCTChannelAttention':DCTChannelAttention})
|
||||
trained_model = tf.keras.models.load_model(save_name, compile=False, custom_objects={'LSTMLayer': LSTMLayer,'LightChannelAttention1':LightChannelAttention})
|
||||
trained_model.compile(optimizer=tf.optimizers.SGD(), loss=FTMSE())
|
||||
|
||||
# 使用已知的点进行预测
|
||||
|
|
|
|||
|
|
@ -86,7 +86,6 @@ class LightChannelAttention1(layers.Layer):
|
|||
raise ValueError('Inputs to `DynamicChannelAttention` should have rank 3. '
|
||||
'Received input shape:', str(input_shape))
|
||||
|
||||
print(input_shape)
|
||||
# GAP
|
||||
self.GAP = tf.keras.layers.GlobalAvgPool1D()
|
||||
self.c1 = tf.keras.layers.Conv1D(filters=input_shape[2], kernel_size=1, padding='SAME')
|
||||
|
|
@ -98,13 +97,14 @@ class LightChannelAttention1(layers.Layer):
|
|||
|
||||
# GAP
|
||||
GAP = self.GAP(DWC1)
|
||||
|
||||
GAP = tf.expand_dims(GAP, axis=1)
|
||||
c1 = self.c1(GAP)
|
||||
c1 = tf.keras.layers.BatchNormalization()(c1)
|
||||
s1 = tf.nn.sigmoid(c1)
|
||||
print(s1)
|
||||
|
||||
s1 = tf.broadcast_to(s1, [batch_size, length, channel])
|
||||
|
||||
s1 = tf.tile(s1, [1, length, 1])
|
||||
|
||||
|
||||
return s1 * inputs
|
||||
|
|
|
|||
Loading…
Reference in New Issue