diff --git a/phm_rotate/PHMWarehouse/constant/__init__.py b/phm_rotate/PHMWarehouse/constant/__init__.py new file mode 100644 index 0000000..d2be0c7 --- /dev/null +++ b/phm_rotate/PHMWarehouse/constant/__init__.py @@ -0,0 +1,8 @@ +#-*- encoding:utf-8 -*- + +''' +@Author : dingjiawen +@Date : 2023/5/16 21:10 +@Usage : +@Desc : +''' \ No newline at end of file diff --git a/phm_rotate/PHMWarehouse/dataChange/FFTTest.py b/phm_rotate/PHMWarehouse/dataChange/FFTTest.py new file mode 100644 index 0000000..4b8ff04 --- /dev/null +++ b/phm_rotate/PHMWarehouse/dataChange/FFTTest.py @@ -0,0 +1,48 @@ +# -*- encoding:utf-8 -*- + +''' +@Author : dingjiawen +@Date : 2023/5/24 13:45 +@Usage : +@Desc : 数据的fft测试 +''' + +import numpy as np +import pandas as pd +from scipy.fftpack import fft +import matplotlib.pyplot as plt + +array = np.array([-0.029078494757, + -0.33095228672, + -0.12124221772, + 0.553512275219, + -0.158036053181, + 0.268739402294, + -0.638222515583, + 0.233140587807, + -0.173265621066, + 0.467218101025, + -0.372010827065, + -0.136630430818, + 0.343256533146, + 0.008932195604]) + +# 幅值 +print(len(array)) +print(np.fft.rfft(array)) +print(np.fft.rfft(array) * 2 / len(array)) +# 幅值 +total = np.abs(np.fft.rfft(array) * 2 / len(array)) +# 相角 +angle = np.angle(np.fft.rfft(array) * 2 / len(array)) +# 时部 +data = np.real(np.fft.rfft(array)) +# 虚部 +array = np.imag(np.fft.rfft(array)) + +print(total) +print(data) +print(array) +plt.plot(data) +plt.plot(array) +plt.show() diff --git a/phm_rotate/PHMWarehouse/dataChange/__init__.py b/phm_rotate/PHMWarehouse/dataChange/__init__.py new file mode 100644 index 0000000..6cbe440 --- /dev/null +++ b/phm_rotate/PHMWarehouse/dataChange/__init__.py @@ -0,0 +1,8 @@ +#-*- encoding:utf-8 -*- + +''' +@Author : dingjiawen +@Date : 2023/5/24 13:45 +@Usage : +@Desc : +''' \ No newline at end of file diff --git a/phm_rotate/PHMWarehouse/dataChange/crud_hdfs.py b/phm_rotate/PHMWarehouse/dataChange/crud_hdfs.py new file mode 100644 index 0000000..09101db --- /dev/null +++ b/phm_rotate/PHMWarehouse/dataChange/crud_hdfs.py @@ -0,0 +1,121 @@ +import csv +import os +import time +import pandas as pd +from pyspark.sql import SparkSession +from pyspark.sql.functions import to_timestamp + + +def get_spark(): + spark = SparkSession \ + .builder \ + .appName("insert_data_into_hive") \ + .config("spark.sql.inMemoryColumnarStorage.enableVectorizedReader", "true") \ + .config("spark.sql.execution.arrow.pyspark.enabled", "true") \ + .config("spark.sql.session.timeZone", "Asia/Shanghai") \ + .config('spark.driver.memory', '8g') \ + .config('spark.executor.memory', '8g') \ + .config('spark.executor.instances', '4') \ + .config('spark.executor.cores', '3') \ + .config("hive.metastore.uris", "thrift://localhost:9083") \ + .enableHiveSupport() \ + .getOrCreate() + + spark.sparkContext.setLogLevel("Error") + return spark + + +# 解析本地文件,并存入hive +def load_data_to_hive(): + spark = get_spark() + sensor_map = {"低速轴径向": 1, + "第一级内齿圈径向": 2, + "高速轴径向": 3, + "非驱动端径向": 4, + "驱动端径向": 5, + "驱动端轴向": 6, + "后轴承径向": 7, + "前轴承径向": 8, + } + batch = 500 # 一次存batch条数据,太大会报错 + pick_fj = ['052', '053'] # 只存这两台 + base_folder = r'D:\lab\data_set\hz_osdn\苏步#2期201703\时域波形数据' + all_files = os.listdir(base_folder) + pick_files = list(filter(lambda x: x.split('_')[3] in pick_fj, all_files)) + records = 0 + total_times = int(len(pick_files) / batch) + for i in range(total_times + 1): + data_dict = {} + if i != total_times: + batch_files = pick_files[i * batch:(i + 1) * batch] + else: + if len(pick_files) % batch != 0: + batch_files = pick_files[i * batch:] + else: + return + + split_names = list(map(lambda x: x.split("_"), batch_files)) + data_dict["farm_id"] = list(map(lambda x: x[0] + "_" + x[1], split_names)) + data_dict["turbine_id"] = list(map(lambda x: x[3], split_names)) + data_dict["sensor_id"] = list(map(lambda x: sensor_map[x[5]], split_names)) + data_dict["data_time"] = list(map(lambda x: x[-2], split_names)) + data_dict["rpm"] = list(map(lambda x: float(x[-1][:-7]), split_names)) + data_dict["frequency"] = list(map(lambda x: int(x[-4][:-2]), split_names)) + data_dict["year_id"] = list(map(lambda x: x[-2][:4], split_names)) + data_dict["month_id"] = list(map(lambda x: x[-2][4:6], split_names)) + data_dict["day_id"] = list(map(lambda x: x[-2][6:8], split_names)) + + tmp = [] + for file in batch_files: + file_path = os.path.join(base_folder, file) + oldf = open(file_path, 'r', encoding='UTF-8') + data = list(csv.reader(oldf))[0] + data = list(filter(lambda x: x != '', data)) # 过滤空值 + data = [float(item) for item in data] + tmp.append(data) + + data_dict["vib"] = tmp + + tmp_df = pd.DataFrame(data_dict) + df = spark.createDataFrame(tmp_df) + df1 = df.withColumn("data_time", to_timestamp("data_time", 'yyyyMMddHHmmss')) + spark.sql("set hive.exec.dynamic.partition=true") + spark.sql("set hive.exec.dynamic.partition.mode = nonstrict") + df1.repartition(5).write \ + .format("Hive") \ + .mode('append') \ + .option("header", "false") \ + .option("delimiter", "\t") \ + .saveAsTable("flask_data.sds", partitionBy=["year_id", "month_id", "day_id"]) + + records += len(batch_files) + print("已传{}/{}条数据".format(records, len(pick_files))) + # mode有append和overwrite两种,overwrite表示把原来的表删除,并重新建一张表, append表示向已存在的表中添加数据 + # 如果表已经存在,format应为建表时指定的格式(orc, parquet等),如果不知道建表时使用的格式,用“Hive”自动设置 + + +# load_data_to_hive() + + +def ods_to_dwd(): + spark = get_spark() + + +def dwd_to_dws(): + pass + + +def query_dwd(start_time, end_time=None): + pass + + +def query_dws(): + pass + + +def query_ada(): + pass + + +def query_dim(): + pass diff --git a/phm_rotate/PHMWarehouse/dataMap/GasDimDataInsert.py b/phm_rotate/PHMWarehouse/dataMap/GasDimDataInsert.py new file mode 100644 index 0000000..b45f920 --- /dev/null +++ b/phm_rotate/PHMWarehouse/dataMap/GasDimDataInsert.py @@ -0,0 +1,102 @@ +# -*- encoding:utf-8 -*- + +''' +@Author : dingjiawen +@Date : 2023/5/20 16:32 +@Usage : +@Desc : 随机向维度表中插入数据 +''' + +import pymysql +import pandas as pd +from pymysql.connections import Connection +import os +import random +import shutil +import math + +root = r"E:\面试准备\interview-prepare\个人总结\04大数据\09项目\大型旋转机组健康管理系统软件\数仓\数仓建设\配置类数据库建立\20230327092414/" +root_file = os.path.join(root, "data_csv\\") + +if not os.path.exists(root_file): + os.makedirs(root_file) + + +def nameChange(): + file_name = os.listdir(root) + typeIdList = set() + for file in file_name: + if file.endswith(".xlsx"): + final_name = file.split(".")[0] + ".csv" + typeIdList.add(file.split("_")[1]) + srcfile = os.path.join(root, file) + print(srcfile) + ex = pd.read_excel(srcfile) + tarfile = os.path.join(root_file, final_name) + ex.to_csv(tarfile, encoding="utf-8") + + # shutil.copy(srcfile, tarfile) + # print("copy %s -> %s" % (srcfile, tarfile)) + + +def loadData(): + file_name = os.listdir(root_file) + typeIdList = set() + for file in file_name: + read_name = os.path.join(root_file + file) + # print(read_name) + data = pd.read_csv(read_name, encoding='utf-8') + for name in data['ORIGINAL_TAG']: + if type(name) is str and len(name) > 5: + # print(name.split("].")[0]) + typeIdList.add(name.split("].")[0].split(".")[2]) + break + # print(data) + print(typeIdList) + return typeIdList + + pass + + +def getConnection(): + # 打开数据库连接 + try: + conn = pymysql.connect(host='Ding202', user='root', passwd='123456', port=3306, database='rt_phm_table') + print('连接成功!') + except: + print('something wrong!') + return conn + + +def insert_type_info(conn: Connection, if_clear=True): + typeIdList = loadData() + usersvalues = [] + for typeId in typeIdList: + usersvalues.append( + (typeId, random.randint(1000, 2000), random.randint(40000, 50000), random.randint(10000, 20000), + random.randint(100, 200), random.randint(100, 300), random.randint(100, 500), random.randint(100, 500))) + cs = conn.cursor() # 获取光标 + if if_clear: + cs.execute("truncate table gas_type_info") + # 注意这里使用的是executemany而不是execute,下边有对executemany的详细说明 + cs.executemany( + "insert into gas_type_info values(%s,%s,%s,%s,%s,%s,%s,%s)", + usersvalues) + + conn.commit() + cs.close() + conn.close() + print('OK') + + +def insert_data(): + conn = getConnection() + + insert_type_info(conn) + pass + + +if __name__ == '__main__': + # nameChange() + # loadData() + insert_data() diff --git a/phm_rotate/PHMWarehouse/dataMap/WindDimDataInsert.py b/phm_rotate/PHMWarehouse/dataMap/WindDimDataInsert.py new file mode 100644 index 0000000..ba1627f --- /dev/null +++ b/phm_rotate/PHMWarehouse/dataMap/WindDimDataInsert.py @@ -0,0 +1,128 @@ +# -*- encoding:utf-8 -*- + +''' +@Author : dingjiawen +@Date : 2023/5/20 16:32 +@Usage : +@Desc : 随机向维度表中插入数据 +''' + +import pymysql +import pandas as pd +from pymysql.connections import Connection +import os +import random +import shutil +import math + +root = r"E:\面试准备\interview-prepare\个人总结\04大数据\09项目\大型旋转机组健康管理系统软件\数仓\数仓建设\配置类数据库建立\20230327092414/" +root_file = os.path.join(root, "data_csv\\") + +if not os.path.exists(root_file): + os.makedirs(root_file) + + +def createWindTypeInfo(): + windTypeInfo = [] + + for wt_no in range(25): + windTypeInfo.append(("jingbian_siqi_" + str(wt_no), "jingbian_siqi", random.randint(0, 4))) + windTypeInfo.append(("huanengsantanghu_" + str(wt_no), "huanengsantanghu", random.randint(0, 4))) + windTypeInfo.append(("bandacheng_" + str(wt_no), "bandacheng", random.randint(0, 4))) + return windTypeInfo + pass + + +def createWindTypeDetail(): + windTypeDetail = [] + for type_id in range(5): + windTypeDetail.append( + (type_id, random.randint(1000, 2000), random.randint(20000, 50000), random.randint(5000, 10000), + random.randint(1000, 5000), random.randint(10000, 50000), random.randint(70, 100)) + ) + return windTypeDetail + pass + + +def createLocationInfo(): + locationInfo = [] + + locationInfo.append( + ("jingbian_siqi", "靖边四期", 100.65, 82.46, 20000, 20) + ) + + locationInfo.append( + ("huanengsantanghu", "华能三塘湖", 110.65, 67.46, 20000, 30) + ) + + locationInfo.append( + ("bandacheng", "扳达城", 60.65, 103.46, 16000, 10) + ) + + return locationInfo + pass + + +def getConnection(): + # 打开数据库连接 + try: + conn = pymysql.connect(host='Ding202', user='root', passwd='123456', port=3306, database='rt_phm_table') + print('连接成功!') + except: + print('something wrong!') + return conn + +def insert_type_detail(conn: Connection, if_clear=True): + windTypeDetail = createWindTypeDetail() + cs = conn.cursor() # 获取光标 + if if_clear: + cs.execute("truncate table wind_type_detail") + cs.executemany("insert into wind_type_detail values(%s,%s,%s,%s,%s,%s,%s)", windTypeDetail) + conn.commit() + cs.close() + conn.close() + print('OK') + pass + +def insert_location(conn: Connection, if_clear=True): + locationInfo = createLocationInfo() + cs = conn.cursor() # 获取光标 + if if_clear: + cs.execute("truncate table wind_location_info") + cs.executemany("insert into wind_location_info values(%s,%s,%s,%s,%s,%s)", locationInfo) + conn.commit() + cs.close() + conn.close() + print('OK') + pass + +def insert_type_info(conn: Connection, if_clear=True): + + windType_info = createWindTypeInfo() + cs = conn.cursor() # 获取光标 + if if_clear: + cs.execute("truncate table wind_type_info") + + # 注意这里使用的是executemany而不是execute,下边有对executemany的详细说明 + cs.executemany("insert into wind_type_info values(%s,%s,%s)", windType_info) + + conn.commit() + cs.close() + conn.close() + print('OK') + + +def insert_data(): + conn = getConnection() + insert_type_info(conn) + insert_location(conn) + insert_type_detail(conn) + pass + + +if __name__ == '__main__': + # nameChange() + # loadData() + # insert_data() + conn = getConnection() + insert_location(conn) diff --git a/phm_rotate/PHMWarehouse/dataMap/__init__.py b/phm_rotate/PHMWarehouse/dataMap/__init__.py new file mode 100644 index 0000000..9c60885 --- /dev/null +++ b/phm_rotate/PHMWarehouse/dataMap/__init__.py @@ -0,0 +1,8 @@ +#-*- encoding:utf-8 -*- + +''' +@Author : dingjiawen +@Date : 2023/5/17 21:43 +@Usage : +@Desc : +''' \ No newline at end of file diff --git a/phm_rotate/PHMWarehouse/dataMap/dataMapToMysql.py b/phm_rotate/PHMWarehouse/dataMap/dataMapToMysql.py new file mode 100644 index 0000000..a69b1da --- /dev/null +++ b/phm_rotate/PHMWarehouse/dataMap/dataMapToMysql.py @@ -0,0 +1,58 @@ +#-*- encoding:utf-8 -*- + +''' +@Author : dingjiawen +@Date : 2023/5/17 21:43 +@Usage : +@Desc : 将TagMapping导入mysql +''' +import pymysql +import pandas as pd +from pymysql.connections import Connection + +data_path = r"E:\面试准备\interview-prepare\个人总结\04大数据\09项目\大型旋转机组健康管理系统软件\数仓\数仓建设\配置类数据库建立\20230327092414\TagMapping_PL193CEPB_C.csv" +data_df = pd.read_csv(data_path, encoding='gbk') + +data_df =data_df.fillna("") + + + +def getConnection(): + # 打开数据库连接 + try: + conn = pymysql.connect(host='Ding202', user='root', passwd='123456', port=3306, database='rt_phm') + print('连接成功!') + except: + print('something wrong!') + return conn + +def add_test_users(conn:Connection): + usersvalues = [] + for data in data_df.values: + tag = str(data[1]) + if(len(tag)>0): + data[1] = tag.split("].")[1] + usersvalues.append((data[0],data[1],data[2],data[4],data[5],data[10],0,"")) + + cs = conn.cursor() # 获取光标 + # 注意这里使用的是executemany而不是execute,下边有对executemany的详细说明 + cs.executemany("insert into gas_code_process(decode_value,encode_value,tag_desc,unit,scale_factor,standard_unit,is_calulated,extend_info) values(%s,%s,%s,%s,%s,%s,%s,%s)", usersvalues) + + conn.commit() + cs.close() + conn.close() + print('OK') + +def insert_data(): + conn = getConnection() + + add_test_users(conn) + pass + + +def load_data(): + insert_data() + pass + +if __name__ == '__main__': + insert_data() \ No newline at end of file diff --git a/phm_rotate/PHMWarehouse/dataMap/format.json b/phm_rotate/PHMWarehouse/dataMap/format.json new file mode 100644 index 0000000..e69de29 diff --git a/phm_rotate/PHMWarehouse/dataMap/mysqlDemo.py b/phm_rotate/PHMWarehouse/dataMap/mysqlDemo.py new file mode 100644 index 0000000..e4ed1fe --- /dev/null +++ b/phm_rotate/PHMWarehouse/dataMap/mysqlDemo.py @@ -0,0 +1,84 @@ +#-*- encoding:utf-8 -*- + +''' +@Author : dingjiawen +@Date : 2023/5/17 21:45 +@Usage : +@Desc : 操作mysql的demo +''' +''' +参考: +[1] https://zhuanlan.zhihu.com/p/397765212 +''' + +import pymysql +import time + +def test_db(): + # 打开数据库连接 + try: + db = pymysql.connect(host='Ding202', user='root', passwd='123456', port=3306,database='rt_phm') + print('连接成功!') + except: + print('something wrong!') + + # 使用 cursor() 方法创建一个游标对象 cursor + cursor = db.cursor() + + # 使用 execute() 方法执行 SQL 查询 + cursor.execute("SELECT * from gas_table_process") + + # 获取所有记录列表 + results = cursor.fetchall() + for row in results: + fname = row[0] + lname = row[1] + age = row[2] + sex = row[3] + income = row[4] + # 打印结果 + print('数据查询成功!') + print("fname=%s,lname=%s,age=%s,sex=%s,income=%s" % \ + (fname, lname, age, sex, income)) + + + # 使用 fetchone() 方法获取单条数据. + # data = cursor.fetchone() + + # print("Database version : %s " % data) + + # 关闭数据库连接 + db.close() + + +# 装饰器,计算插入50000条数据需要的时间 +def timer(func): + def decor(*args): + start_time = time.time() + func(*args) + end_time = time.time() + d_time = end_time - start_time + print("the running time is : ", d_time) + + return decor + +# 批量插入 +@timer +def add_test_users(): + usersvalues = [] + for num in range(1, 50000): + usersvalues.append((num,"11",num)) # 注意要用两个括号扩起来 + + conn = pymysql.connect(host='Ding202', port=3306, user='root', password='123456', database='rt_phm', charset='utf8') + cs = conn.cursor() # 获取光标 + # 注意这里使用的是executemany而不是execute,下边有对executemany的详细说明 + cs.executemany("insert into test(age,name,id) values(%s,%s,%s)", usersvalues) + + conn.commit() + cs.close() + conn.close() + print('OK') + + +if __name__ == '__main__': + add_test_users() diff --git a/phm_rotate/PHMWarehouse/dataShow/__init__.py b/phm_rotate/PHMWarehouse/dataShow/__init__.py new file mode 100644 index 0000000..7c1bdf7 --- /dev/null +++ b/phm_rotate/PHMWarehouse/dataShow/__init__.py @@ -0,0 +1,8 @@ +#-*- encoding:utf-8 -*- + +''' +@Author : dingjiawen +@Date : 2023/6/1 15:15 +@Usage : +@Desc : +''' \ No newline at end of file diff --git a/phm_rotate/PHMWarehouse/dataSouce/GasTurnbine.py b/phm_rotate/PHMWarehouse/dataSouce/GasTurnbine.py new file mode 100644 index 0000000..fe58869 --- /dev/null +++ b/phm_rotate/PHMWarehouse/dataSouce/GasTurnbine.py @@ -0,0 +1,104 @@ +# -*- encoding:utf-8 -*- + +''' +@Author : dingjiawen +@Date : 2023/5/17 15:42 +@Usage : 燃气轮机数据读取和示例 +@Desc : 燃气轮机数据读取和示例 +''' +import numpy as np +import pandas as pd +from datetime import datetime +import sys +from apscheduler.schedulers.blocking import BlockingScheduler +import pytz +from utils.KafkaUtils import * + +data_path = r"E:\data\Historian(1)\historian_data_demo\2023_01_02.npy" +total_data = np.load(data_path) + + +data_sorted = total_data[total_data[:, 2].argsort()] +now_time = datetime.now() +index = 0 + +print(total_data) +print(data_sorted) + +def sent_data(): + try: + global index + single_data = total_data[index, :] + # print(single_data) + data = { + "tagName": single_data[0], + "value": single_data[1], + "realtime": single_data[2], + "time": now_time.strftime("%Y%m%d%H%M%S") + } + data_send = json.dumps(data).encode("gbk") + # 发送三条消息 + producer = getKafkaProducer() + producer.send( + 'ods_base_data', + value=data_send + ) # 向分区1发送消息 + # print("send {}".format( + # data_send)) + # 监控是否发送成功 + index = index + 1 + except Exception as e: + try: + traceback.print_exc(file=sys.stdout) + except Exception as ex: + print(ex) + finally: + index = min(len(total_data),index) + # index = index % 5000 + print(data) + print("end send data") + pass + + +def sent_data_sorted(): + try: + global index + single_data = data_sorted[index, :] + # print(single_data) + data = { + "tagName": single_data[0], + "value": single_data[1], + "realtime": single_data[2], + "time": now_time.strftime("%Y%m%d%H%M%S") + } + data_send = json.dumps(data).encode("gbk") + # 发送三条消息 + producer = getKafkaProducer() + producer.send( + 'ods_base_data', + value=data_send + ) # 向分区1发送消息 + # print("send {}".format( + # data_send)) + # 监控是否发送成功 + index = index + 1 + except Exception as e: + try: + traceback.print_exc(file=sys.stdout) + except Exception as ex: + print(ex) + finally: + index = min(len(total_data),index) + # index = index % 5000 + print(data) + print("end send data") + pass + pass + + + +# if __name__ == '__main__': +# timez = pytz.timezone('Asia/Shanghai') +# scheduler = BlockingScheduler(timezone=timez) +# scheduler.add_job(sent_data_sorted, 'interval', seconds=0.5, next_run_time=datetime.now(), max_instances=10) # 定时发送 +# scheduler.start() \ No newline at end of file diff --git a/phm_rotate/PHMWarehouse/dataSouce/KafkaDemo.py b/phm_rotate/PHMWarehouse/dataSouce/KafkaDemo.py new file mode 100644 index 0000000..da32b8e --- /dev/null +++ b/phm_rotate/PHMWarehouse/dataSouce/KafkaDemo.py @@ -0,0 +1,112 @@ +# -*- coding: utf-8 -*- + +''' +@Author : dingjiawen +@Date : 2023/5/16 19:51 +@Usage : +@Desc : 一些使用kafka的demo +''' + +''' +参考: +[1] https://blog.csdn.net/fenglepeng/article/details/109454576 +[2] https://zhuanlan.zhihu.com/p/279784873 +''' + +from kafka import KafkaProducer, KafkaConsumer +from kafka.errors import kafka_errors +import traceback +import json + +# TODO KafkaProducer(**configs):参数 +''' +bootstrap_servers –'host[:port]'字符串,或者由'host[:port]'组成的字符串,形如['10.202.24.5:9096', '10.202.24.6:9096', '10.202.24.7:9096']),其中,host为broker(Broker:缓存代理,Kafka集群中的单台服务器)地址,默认值为 localhost, port默认值为9092,这里可以不用填写所有broker的host和port,但必须保证至少有一个broker) +key_serializer (可调用对象) –用于转换用户提供的key值为字节,必须返回字节数据。 如果为None,则等同调用f(key)。 默认值: None. +value_serializer(可调用对象) – 用于转换用户提供的value消息值为字节,必须返回字节数据。 如果为None,则等同调用f(value)。 默认值: None. +''' +# TODO TODO send(topic, value=None, key=None, headers=None, partition=None, timestamp_ms=None) +''' +函数返回FutureRecordMetadata类型的RecordMetadata数据 + +topic(str) – 设置消息将要发布到的主题,即消息所属主题 +value(可选) – 消息内容,必须为字节数据,或者通过value_serializer序列化后的字节数据。如果为None,则key必填,消息等同于“删除”。( If value is None, key is required and message acts as a ‘delete’) +partition (int, 可选) – 指定分区。如果未设置,则使用配置的partitioner +key (可选) – 和消息对应的key,可用于决定消息发送到哪个分区。如果partition为None,则相同key的消息会被发布到相同分区(但是如果key为None,则随机选取分区)(If partition is None (and producer’s partitioner config is left as default), then messages with the same key will be delivered to the same partition (but if key is None, partition is chosen randomly)). 必须为字节数据或者通过配置的key_serializer序列化后的字节数据. +headers (可选) – 设置消息header,header-value键值对表示的list。list项为元组:格式 (str_header,bytes_value) +timestamp_ms (int, 可选) –毫秒数 (从1970 1月1日 UTC算起) ,作为消息时间戳。默认为当前时间 +''' + + +def producer_demo(): + # 假设生产的消息为键值对(不是一定要键值对),且序列化方式为json + producer = KafkaProducer( + bootstrap_servers=['192.168.118.202:9092'], + key_serializer=lambda v: json.dumps(v).encode('utf-8'), + value_serializer=lambda v: json.dumps(v).encode('utf-8'), + ) + # + # producer = KafkaProducer( + # bootstrap_servers=['192.168.118.202:9092'], + # key_serializer=str.encode('utf-8'), + # value_serializer=str.encode('utf-8'), + # ) + + # producer = KafkaProducer( + # bootstrap_servers=['192.168.118.202:9092'] + # ) + # 发送三条消息 + for i in range(0, 3): + data = {"status": 0, "msg": "", "data": [{"name": "海门", "value": 1}, {"name": "鄂尔多斯", "value": 1}]} + print(json.dumps(data)) + print(type(json.dumps(data))) + # send_data = json.dumps(data).encode('GBK') + data_send = json.dumps(data) + future = producer.send( + 'pykafka_demo', + value=data_send + ) # 向分区1发送消息 + print("send {}".format( + data_send)) + try: + future.get(timeout=10) # 监控是否发送成功 + except kafka_errors: # 发送失败抛出kafka_errors + traceback.format_exc() + + +# TODO class kafka.KafkaConsumer(*topics, **configs) +''' +*topics (str) – 可选,设置需要订阅的topic,如果未设置,需要在消费记录前调用subscribe或者assign。 +bootstrap_servers –'host[:port]'字符串,或者由'host[:port]'组成的字符串,形如['10.202.24.5:9096', '10.202.24.6:9096', '10.202.24.7:9096']),其中,host为broker(Broker:缓存代理,Kafka集群中的单台服务器)地址,默认值为 localhost, port默认值为9092,这里可以不用填写所有broker的host和port,但必须保证至少有一个broker) +client_id (str) – 客户端名称,默认值: ‘kafka-python-{version}’ +group_id (str or None) – 消费组名称。如果为None,则通过group coordinator auto-partition分区分配,offset提交被禁用。默认为None +auto_offset_reset (str) – 重置offset策略: 'earliest'将移动到最老的可用消息, 'latest'将移动到最近消息。 设置为其它任何值将抛出异常。默认值:'latest'。 +enable_auto_commit (bool) – 如果为True,将自动定时提交消费者offset。默认为True。 +auto_commit_interval_ms (int) – 自动提交offset之间的间隔毫秒数。如果enable_auto_commit 为true,默认值为: 5000。 +value_deserializer(可调用对象) - 携带原始消息value并返回反序列化后的value +consumer_timeout_ms – 毫秒数,若不指定 consumer_timeout_ms,默认一直循环等待接收,若指定,则超时返回,不再等待 +max_poll_interval_ms – 毫秒数,它表示最大的poll数据间隔,如果超过这个间隔没有发起pool请求,但heartbeat仍旧在发,就认为该 consumer 处于 livelock 状态,进行 reblancing +session_timout_ms – 毫秒数,控制心跳超时时间。在分布式系统中,由于网络问题你不清楚没接收到心跳,是因为对方真正挂了还是只是因为负载过重没来得及发生心跳或是网络堵塞。所以一般会约定一个时间,超时即判定对方挂了 +heartbeat_interval_ms – 毫秒数,控制心跳发送频率,频率越高越不容易被误判,但也会消耗更多资源。 +max_pool_record(int),kafka 每次 pool 拉取消息的最大数量 +''' +def consumer_demo(): + consumer = KafkaConsumer( + 'aaa', + bootstrap_servers=['192.168.118.202:9092'], + group_id='test' + ) + for message in consumer: + message1 = str(message.value, encoding="utf-8") + print(message1) + print(type(message1)) + # print("receive, key: {}, value: {}".format( + # json.loads(message.key), + # json.loads(message.value) + # ) + # ) + pass + + +if __name__ == '__main__': + consumer_demo() + # producer_demo() diff --git a/phm_rotate/PHMWarehouse/dataSouce/WindTurnbine.py b/phm_rotate/PHMWarehouse/dataSouce/WindTurnbine.py new file mode 100644 index 0000000..60220da --- /dev/null +++ b/phm_rotate/PHMWarehouse/dataSouce/WindTurnbine.py @@ -0,0 +1,151 @@ +# -*- coding: utf-8 -*- + +# coding: utf-8 + +''' +@Author : dingjiawen +@Date : 2023/5/16 19:46 +@Usage : 将风电数据发送到kafka +@Desc : 风电齿轮箱数据读取和示例 +''' +import pandas as pd +from datetime import datetime +import numpy as np +import sys +from apscheduler.schedulers.blocking import BlockingScheduler +import pytz +from utils.KafkaUtils import * +import os + +scada_data_path = r"G:\data\SCADA数据\jb4q_8_delete_total_zero.csv" +scada_data_df = pd.read_csv(scada_data_path, encoding='gbk') + +cms_send_data = [] +cms_total_send_data = [] +cms_total_data = [] + +index = 0 +indexCMS = 0 +cmsInterval = 10000 +i = 0 +now_time = datetime.now() + +root = r"E:\data\cmsDemo/" + + +def loadData(): + file_name = os.listdir(root) + dataList = [] + for file in file_name: + if file.startswith("华能三塘湖"): + read_name = os.path.join(root + file) + # print(read_name) + data = pd.read_csv(read_name, encoding='utf-8', header=None) + total_data = data.values[0] + name = file.split("_") + data = { + "windfarm": name[0], + "wt_no": 5, + "realtime": file.split(".csv")[0].split("_")[-1], + "location": name[2] + "_" + name[3], + "g": 3, + "RPM": 14.41, + "freq": name[4], + "x": [], + "time": now_time.strftime("%Y%m%d%H%M%S") + } + dataList.append((data, total_data)) + print("load cms data is comleted") + global cms_send_data + cms_send_data = dataList + + pass + + +def sent_SCADA_data(): + try: + global index + data = scada_data_df.iloc[index].to_json() + data = json.loads(data) + data["time"] = now_time.strftime("%Y%m%d%H%M%S") + # print(type(data)) + data_send = json.dumps(data).encode("gbk") + # 发送三条消息 + producer = getKafkaProducer() + producer.send( + 'ods_base_data', + value=data_send + ) # 向分区1发送消息 + print("send {}".format( + data_send)) + # 监控是否发送成功 + index = index + 1 + except Exception as e: + try: + traceback.print_exc(file=sys.stdout) + except Exception as ex: + print(ex) + finally: + # index = index % 5000 + print(data) + print("end send data") + + +def sent_CMS_data(): + try: + global indexCMS + global i + limit = indexCMS + cmsInterval + if limit >= len(cms_total_data[i]): + # 是发完剩下的还是换一个文件发: + if limit == len(cms_total_data[i])+cmsInterval: + # 换一个文件发 + indexCMS = 0 + i += 1 + limit = indexCMS + cmsInterval + else: + # 发完剩下的 + limit = len(cms_total_data[i]) + # print(i) + cms_sent_data = cms_total_send_data[i] + + cms_sent_data["x"] = list(cms_total_data[i][indexCMS:limit]) + data = cms_sent_data + data_send = json.dumps(data).encode("gbk") + # 发送三条消息 + producer = getKafkaProducer() + producer.send( + 'ods_base_data', + value=data_send + ) # 向分区1发送消息 + + # 监控是否发送成功 + indexCMS = limit + except Exception as e: + try: + traceback.print_exc(file=sys.stdout) + except Exception as ex: + print(ex) + finally: + # indexCMS = indexCMS % 5000 + print("data", data) + print("end send data") + pass + + +def send_cms_data_before(): + for send_data, total_data in cms_send_data: + global cms_total_send_data + global cms_total_data + cms_total_send_data.append(send_data) + cms_total_data.append(total_data) + + +if __name__ == '__main__': + loadData() + send_cms_data_before() + timez = pytz.timezone('Asia/Shanghai') + scheduler = BlockingScheduler(timezone=timez) + scheduler.add_job(sent_CMS_data, 'interval', seconds=0.5, next_run_time=datetime.now(), max_instances=10) # 定时发送 + # scheduler.add_job(sent_SCADA_data, 'interval', seconds=0.5, next_run_time=datetime.now(), max_instances=10) # 定时发送 + scheduler.start() diff --git a/phm_rotate/PHMWarehouse/dataSouce/__init__.py b/phm_rotate/PHMWarehouse/dataSouce/__init__.py new file mode 100644 index 0000000..fa96282 --- /dev/null +++ b/phm_rotate/PHMWarehouse/dataSouce/__init__.py @@ -0,0 +1,10 @@ +# -*- coding: utf-8 -*- + +# coding: utf-8 + +''' +@Author : dingjiawen +@Date : 2023/5/16 19:45 +@Usage : +@Desc : +''' \ No newline at end of file diff --git a/phm_rotate/PHMWarehouse/dataSouce/test.py b/phm_rotate/PHMWarehouse/dataSouce/test.py new file mode 100644 index 0000000..6be73d6 --- /dev/null +++ b/phm_rotate/PHMWarehouse/dataSouce/test.py @@ -0,0 +1,110 @@ +# -*- encoding:utf-8 -*- + +''' +@Author : dingjiawen +@Date : 2023/5/16 21:51 +@Usage : +@Desc :测试 +''' + +import numpy as np +import pandas as pd +from datetime import datetime +import json +import os + +####TODO 测试CMS数据读取 + +root = r"E:\data\cmsDemo/" +now_time = datetime.now() + +def loadData(): + file_name = os.listdir(root) + dataList=[] + for file in file_name: + if file.startswith("华能三塘湖"): + read_name = os.path.join(root + file) + # print(read_name) + data = pd.read_csv(read_name, encoding='utf-8',header=None) + total_data = data.values[0] + name = file.split("_") + data = { + "windfarm": name[0], + "wt_no": 5, + "realtime": file.split(".")[0].split("_")[-1], + "location": name[2]+"_"+name[3], + "g": 3, + "RPM": 14.41, + "freq": name[4], + "x": [], + "time": now_time.strftime("%Y%m%d%H%M%S") + } + dataList.append((data,total_data)) + + return dataList + + pass + +dataList = loadData() + +print(dataList) + +cms_data_path = r"E:\data\cmsDemo\华能三塘湖项目(一期)_D3-29_Shaft2_径向_25600_加速度g_14.41RPM_20180618003450.csv" +cms_data_df = pd.read_csv(cms_data_path, encoding='gbk', header=None) + + +x = list(cms_data_df) + +send_data = list(cms_data_df.values[0]) +print(send_data) +print(type(send_data)) + +data = { + "windfarm": "华能三塘湖(一期)", + "wt_no": 5, + "realtime": "20180618003450", + "location": "shaft_径向", + "g": 3, + "RPM": 14.41, + "freq": 25600, + "x": send_data[:3], + "time": now_time.strftime("%Y%m%d%H%M%S") +} + +print(data) +# print(type(data)) +data_send = json.dumps(data).encode("gbk") +print(data_send) + + +####TODO 测试燃机数据读取 +# now_time = datetime.now() +# data_path = r"E:\data\Historian(1)\historian_data_demo\2023_01_02.npy" +# total_data = np.load(data_path) +# print(total_data) +# print(len(total_data)) +# +# # # 获得行值 +# rows = np.unique(total_data[:, 2]) +# rows = np.insert(rows, 0, "Time") +# # 排序操作,升序排序,按照第一列进行排序, +# data_sorted = total_data[total_data[:, 2].argsort()] +# # 获得列值 +# columns = np.unique(data_sorted[:, 0]) +# # 获得集合,在此基础上进行添加 +# values = rows[:, np.newaxis] +# print("==================") +# print(data_sorted) + + + +# single_data = total_data[1, :] +# print(single_data) +# data = { +# "tagName": single_data[0], +# "value": single_data[1], +# "realtime": single_data[2], +# "time": now_time.strftime("%Y%m%d%H%M%S") +# } + +# print(data) diff --git a/phm_rotate/PHMWarehouse/modelConditionMonitoring/CNN-LSTM.py b/phm_rotate/PHMWarehouse/modelConditionMonitoring/CNN-LSTM.py new file mode 100644 index 0000000..15566b0 --- /dev/null +++ b/phm_rotate/PHMWarehouse/modelConditionMonitoring/CNN-LSTM.py @@ -0,0 +1,139 @@ +# -*- encoding:utf-8 -*- + +''' +@Author : dingjiawen +@Date : 2023/5/29 16:00 +@Usage : 风机状态监测 +@Desc : 从Kafka中读取dwm_wind_wide数据,通过CNN-LSTM模型进行状态监测 + 暂时存在问题:如果来一条的话,有维度的数据比如LSTM,GRU等就没办法了,只能跑AE类型的模型 +''' + +import tensorflow as tf +import numpy as np +import pandas as pd +from utils.KafkaUtils import * +from keras.callbacks import EarlyStopping +import json + +# 超参数定义 +timestamp = 100 +feature_num = 10 +learning_rate = 0.01 +EPOCH = 100 +batch_size = 32 +model_name = "AE" +save_name = "./model/{0}_timestamp{0}_feature_num{2}_batchSize{4}_epoch{4}.h5".format(model_name, timestamp, + feature_num, batch_size, EPOCH) + + +def condition_monitoring_model(): + input = tf.keras.Input(shape=[timestamp, feature_num]) + conv1 = tf.keras.layers.Conv1D(filters=256, kernel_size=1)(input) + GRU1 = tf.keras.layers.GRU(128, return_sequences=False)(conv1) + d1 = tf.keras.layers.Dense(300)(GRU1) + output = tf.keras.layers.Dense(10)(d1) + model = tf.keras.Model(inputs=input, outputs=output) + + return model + +def condition_monitoring_model_AE(): + input = tf.keras.Input(shape=[feature_num]) + conv1 = tf.keras.layers.Dense(feature_num/2)(input) + GRU1 = tf.keras.layers.Dense(3)(conv1) + d1 = tf.keras.layers.Dense(feature_num/2)(GRU1) + output = tf.keras.layers.Dense(feature_num)(d1) + model = tf.keras.Model(inputs=input, outputs=output) + + return model +# 读取数据 +def read_data(): + data = pd.read_csv("G:\data\SCADA数据/jb4q_8_delete_all_zero_simple.csv") + data = data.iloc[:10000, 3:13] + data = np.array(data) + return data + pass + + +# 重叠采样 +def get_training_data_overlapping(data, time_stamp=timestamp): + rows, cols = data.shape + train_data = np.empty(shape=[rows - time_stamp - 1, time_stamp, cols]) + train_label = np.empty(shape=[rows - time_stamp - 1, cols]) + for i in range(rows): + if i + time_stamp >= rows: + break + if i + time_stamp < rows - 1: + train_data[i] = data[i:i + time_stamp] + train_label[i] = data[i + time_stamp] + print("重叠采样以后:") + print("data:", train_data) + print("label:", train_label) + + return train_data, train_label + + +# 归一化 +def normalization(data): + rows, cols = data.shape + print("归一化之前:", data) + print(data.shape) + print("======================") + + # 归一化 + max = np.max(data, axis=0) + max = np.broadcast_to(max, [rows, cols]) + min = np.min(data, axis=0) + min = np.broadcast_to(min, [rows, cols]) + + data = (data - min) / (max - min) + print("归一化之后:", data) + print(data.shape) + + return data + + +def kafkaSource(): + pass + + +if __name__ == '__main__': + + # ## TODO training + # data = read_data() + # data = normalization(data=data) + # # train_data, train_label = get_training_data_overlapping(data) + # model = condition_monitoring_model_AE() + # checkpoint = tf.keras.callbacks.ModelCheckpoint( + # filepath=save_name, + # monitor='val_loss', + # verbose=1, + # save_best_only=True, + # mode='min', + # period=1) + # lr_scheduler = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=5, min_lr=0.0001) + # early_stop = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=30, mode='min', verbose=1) + # model.compile(optimizer=tf.optimizers.Adam(learning_rate=learning_rate), loss=tf.losses.mse) + # model.summary() + # model.fit(data, data, batch_size=batch_size, epochs=EPOCH, validation_split=0.1, + # callbacks=[checkpoint, lr_scheduler, early_stop]) + + ### TODO testing + + consumer = getKafkaConsumer("dwm_wind_wide", group_id="wind_condition_monitoring_group") + model = tf.keras.models.load_model(save_name) + for message in consumer: + record = str(message.value, encoding="utf-8") + recordDict = eval(record) + print("转换后的json:",record) + data = np.array([recordDict['num_gearbox_sumptemp'], recordDict['num_gearbox_inletoiltemp'], recordDict['num_gearbox_inletpress'], + recordDict['num_gearbox_pumpoutletpress'], + recordDict['num_gearbox_coolingwatertemp'], recordDict['num_envtemp'], + recordDict['num_gearbox_highspeedshaft_front_temp'], recordDict['num_gearbox_highspeedshaft_rear_temp'], + recordDict['num_gen_max_windingtemp'], recordDict['num_engineroom_temp']]) + data = np.expand_dims(data,axis=0) + # data = normalization(data) + # data = np.expand_dims(data,axis=0) + predicted_data = model.predict(data) + print("实际值:",data) + print("预测出来的值:",predicted_data) + # model.predict(message) diff --git a/phm_rotate/PHMWarehouse/modelConditionMonitoring/__init__.py b/phm_rotate/PHMWarehouse/modelConditionMonitoring/__init__.py new file mode 100644 index 0000000..6899347 --- /dev/null +++ b/phm_rotate/PHMWarehouse/modelConditionMonitoring/__init__.py @@ -0,0 +1,8 @@ +#-*- encoding:utf-8 -*- + +''' +@Author : dingjiawen +@Date : 2023/5/29 16:00 +@Usage : +@Desc : +''' \ No newline at end of file diff --git a/phm_rotate/PHMWarehouse/modelConditionMonitoring/test.py b/phm_rotate/PHMWarehouse/modelConditionMonitoring/test.py new file mode 100644 index 0000000..648b71d --- /dev/null +++ b/phm_rotate/PHMWarehouse/modelConditionMonitoring/test.py @@ -0,0 +1,26 @@ +# -*- encoding:utf-8 -*- + +''' +@Author : dingjiawen +@Date : 2023/5/29 16:18 +@Usage : +@Desc : 模型训练相关测试 +''' +import pandas as pd +import numpy as np +import json +# data = pd.read_csv("G:\data\SCADA数据/jb4q_8_delete_all_zero_simple.csv") +# data = data.iloc[:10000,3:13] +# data = np.array(data) +# print(data) + +record = '{"num_gen_torque":36.32,"num_gearbox_sumptemp":50.035297,"num_gearbox_inletoiltemp":47.73672,"production":"20000","latitude":"82.46","degree_winddirection":172.95117,"num_envtemp":23.717888,"num_gen_speed":1516.0,"rated_efficiency":"96","rated_load":"26521","num_gearbox_pumpoutletpress":5.004181,"rated_speed":"1731","num_engineroom_temp":32.029892,"num_rotorspeed":11.877784,"num_activepower":678.0,"num_engineroom_vibration_x":0.007827997,"rated_air_volume":"9397","num_engineroom_vibration_y":-0.0063325884,"num_gearbox_highspeedshaft_front_temp":60.51593,"num_gen_max_windingtemp":78.60004,"num_gearbox_highspeedshaft_rear_temp":62.51814,"longitude":"100.65","wt_no":8,"realtime":"2018-07-30 00:00:00","num_windspeed":7.318091,"rated_power":"1090","type_id":"3","rated_press":"16036","num_gearbox_coolingwatertemp":42.06111,"num_gearbox_inletpress":2.621296,"location":"靖边四期","time":"20230529172114","windfarm":"jingbian_siqi"}' + +jsonData = json.loads(record) +data = np.array([jsonData['num_gearbox_sumptemp'], jsonData['num_gearbox_inletoiltemp'], jsonData['num_gearbox_inletpress'], + jsonData['num_gearbox_pumpoutletpress'], + jsonData['num_gearbox_coolingwatertemp'], jsonData['num_envtemp'], + jsonData['num_gearbox_highspeedshaft_front_temp'], jsonData['num_gearbox_highspeedshaft_rear_temp'], + jsonData['num_gen_max_windingtemp'], jsonData['num_engineroom_temp']]) +print(data) +print(data.shape) \ No newline at end of file diff --git a/phm_rotate/PHMWarehouse/utils/KafkaUtils.py b/phm_rotate/PHMWarehouse/utils/KafkaUtils.py new file mode 100644 index 0000000..65717b6 --- /dev/null +++ b/phm_rotate/PHMWarehouse/utils/KafkaUtils.py @@ -0,0 +1,59 @@ +# -*- encoding:utf-8 -*- + +''' +@Author : dingjiawen +@Date : 2023/5/16 20:15 +@Usage : 获取kakfa的producer和Consumer,以及发布信息和订阅信息 +@Desc : 创建kafka的工具类 +''' + +from kafka import KafkaProducer, KafkaConsumer +from kafka.errors import kafka_errors +import traceback +import json + +KAFKA_SERVER = ["Ding202:9092", "Ding203:9092", "Ding204:9092"] + + +def getKafkaProducer(key_serializer=None, value_serializer=None): + # 假设生产的消息为键值对(不是一定要键值对),且序列化方式为json + # producer = KafkaProducer( + # bootstrap_servers=['192.168.118.202:9092'], + # key_serializer=lambda v: json.dumps(v).encode('utf-8'), + # value_serializer=lambda v: json.dumps(v).encode('utf-8'), + # ) + + producer = KafkaProducer( + bootstrap_servers=KAFKA_SERVER, + key_serializer=key_serializer, + value_serializer=value_serializer + ) + + return producer + +def getKafkaProducerTest( key_serializer=None, value_serializer=None): + # 假设生产的消息为键值对(不是一定要键值对),且序列化方式为json + # producer = KafkaProducer( + # bootstrap_servers=['192.168.118.202:9092'], + # key_serializer=lambda v: json.dumps(v).encode('utf-8'), + # value_serializer=lambda v: json.dumps(v).encode('utf-8'), + # ) + + producer = KafkaProducer( + bootstrap_servers=KAFKA_SERVER, + key_serializer=key_serializer, + value_serializer=value_serializer + ) + + return producer + +def getKafkaConsumer(topic,group_id ,key_serializer=None, value_serializer=None): + # 假设生产的消息为键值对(不是一定要键值对),且序列化方式为json + consumer = KafkaConsumer( + topic, + group_id=group_id, + bootstrap_servers=KAFKA_SERVER, + auto_offset_reset='latest' + ) + + return consumer diff --git a/phm_rotate/PHMWarehouse/utils/LTTBUtils.py b/phm_rotate/PHMWarehouse/utils/LTTBUtils.py new file mode 100644 index 0000000..2da6035 --- /dev/null +++ b/phm_rotate/PHMWarehouse/utils/LTTBUtils.py @@ -0,0 +1,39 @@ +#-*- encoding:utf-8 -*- + +''' +@Author : dingjiawen +@Date : 2023/5/22 20:33 +@Usage : +@Desc : lttb算法的实现:保留时序数据波动细节的算法 +''' + +import numpy as np +import pandas as pd +import lttb +def lttb_try(a): + length, = a.shape + print(length,":",length) + b = np.array([range(length),a]).T + print(b) + print(b.shape) + return lttb.downsample(b,n_out=int(length/10)) + +if __name__ == '__main__': + cms_data_path = r"E:\data\cmsDemo\华能三塘湖项目(一期)_D3-29_Shaft2_径向_25600_加速度g_14.41RPM_20180618003450.csv" + cms_data_df = pd.read_csv(cms_data_path, encoding='gbk', header=None) + cms_data_df = cms_data_df.values[0] + data_before = cms_data_df[:1000] + down_sample = lttb_try(data_before) + print(down_sample.shape) + + import matplotlib.pyplot as plt + + plt.subplot(2, 1, 1) + plt.plot(data_before) + plt.subplot(2, 1, 2) + plt.plot(down_sample[:, 1]) + plt.show() + + # assert small_data.shape == (20, 2) + + # print(cms_send_data.shape) \ No newline at end of file diff --git a/phm_rotate/PHMWarehouse/utils/RedisUtils.py b/phm_rotate/PHMWarehouse/utils/RedisUtils.py new file mode 100644 index 0000000..487bc73 --- /dev/null +++ b/phm_rotate/PHMWarehouse/utils/RedisUtils.py @@ -0,0 +1,48 @@ +# -*- encoding:utf-8 -*- + +''' +@Author : dingjiawen +@Date : 2023/6/1 15:14 +@Usage : +@Desc : 操作redis的工具类 +''' + +''' +参考: +[1] https://zhuanlan.zhihu.com/p/374381314 +''' + +import redis +import numpy as np +import json +import matplotlib.pyplot as plt +import pandas as pd +# 连接池方式 redisPool = redis.ConnectionPool(host='Ding202', port=6379) + +redisClient = redis.Redis(host='Ding202', port=6379, decode_responses=True, charset='UTF-8', encoding='UTF-8') +read_name = r'E:\data\cmsDemo\华能三塘湖项目(一期)_D3-29_Shaft2_径向_25600_加速度g_14.41RPM_20180618003450.csv' +cms = redisClient.get( + r'华能三塘湖项目(一期):5:cmsLTTB') +# cms = np.array(cms) +cms = json.loads(cms) +x = cms.get("x") +print(cms) +print(x) + +data = pd.read_csv(read_name, encoding='utf-8', header=None) +total_data = data.values[0] + +plt.subplot(2,1,1) +plt.plot(x) + +plt.xlabel("X") +plt.ylabel("Y") +plt.legend() + +plt.subplot(2,1,2) +plt.plot(total_data) +plt.xlabel("X") +plt.ylabel("Y") +plt.legend() + +plt.show() diff --git a/phm_rotate/PHMWarehouse/utils/__init__.py b/phm_rotate/PHMWarehouse/utils/__init__.py new file mode 100644 index 0000000..ddae7d8 --- /dev/null +++ b/phm_rotate/PHMWarehouse/utils/__init__.py @@ -0,0 +1,8 @@ +#-*- encoding:utf-8 -*- + +''' +@Author : dingjiawen +@Date : 2023/5/16 20:15 +@Usage : +@Desc : +''' \ No newline at end of file diff --git a/phm_rotate/PHMWarehouse/utils/qttpy.py b/phm_rotate/PHMWarehouse/utils/qttpy.py new file mode 100644 index 0000000..a823221 --- /dev/null +++ b/phm_rotate/PHMWarehouse/utils/qttpy.py @@ -0,0 +1,341 @@ +#-*- encoding:utf-8 -*- + +''' +@Author : dingjiawen +@Date : 2023/5/22 20:41 +@Usage : +@Desc : +''' + +# -*- coding: utf-8 -*- +import numpy as np +import math +import matplotlib.pyplot as plt +import pandas as pd + + +'''python 操作符 +Method Overloads Call for +__init__ 构造函数 X=Class() +__del__ 析构函数 对象销毁 +__add__ radd,iadd + X+Y,X+=Y +__or__ | X|Y,X|=Y +__repr__ 打印转换 print X,repr(X) +__str__ 打印转换 print X,str(X) +__call__ 调用函数 X() +__getattr_ 限制 X.undefine +__setattr__ 取值 X.any=value +__getitem__ 索引 X[key], + +__len__ 长度 len(X) +__cmp__ 比较 X==Y,X1: +# self.core=a +# d=len(a) +# r=[a[0].shape[0]] +# subshapes=[] +# for i in range(d): +# r=r+[a[i].shape[1]] +# subshapes=subshapes+[a[i].subshape] +# subshape=np.array(subshapes) +# self.d=d +# self.r=r +# self.subshapes=subshapes +# return +# shape=a.shape + + + +if __name__ == '__main__': + cms_data_path = r"E:\data\cmsDemo\华能三塘湖项目(一期)_D3-29_Shaft2_径向_25600_加速度g_14.41RPM_20180618003450.csv" + cms_data_df = pd.read_csv(cms_data_path, encoding='gbk', header=None) + cms_data_df = cms_data_df.values[0] + print(cms_data_df.shape) + ltcore = lqtt(cms_data_df,(cms_data_df.shape[0]/10,1)) + print(ltcore) + + + + + + + + + + + + diff --git a/webSetup/raw/coding-websites.yml b/webSetup/raw/coding-websites.yml new file mode 100644 index 0000000..440e64f --- /dev/null +++ b/webSetup/raw/coding-websites.yml @@ -0,0 +1,48 @@ +- name: Github + url: https://github.com/ + img: /images/logos/github.png + description: 全球最大的同性交友平台 +- name: Gitee + url: https://gitee.com/ + img: /images/logos/gitee.ico + description: 面向开源及私有软件项目的托管平台 +- name: Open Github + url: https://open.itc.cn/ + img: /images/logos/opengithub.png + description: 分享最新鲜、最热门、最流行 Github 开源项目 +- name: HelloGithub + url: https://hellogithub.com/ + img: /images/logos/hellogithub.png + description: 分享 GitHub 上有趣、入门级的开源项目 +- name: 腾讯云 + url: https://cloud.tencent.com/ + img: /images/logos/cloudtencent.ico + description: 产业智变,云启未来 +- name: 阿里云 + url: https://www.aliyun.com/ + img: /images/logos/aliyun-logo.png + description: 为了无法计算的价值 +- name: 雨云 + url: https://www.rainyun.com/ + img: /images/logos/rainyun.png + description: 新一代云服务器提供商 +- name: 稀土掘金 + url: https://juejin.cn/ + img: /images/logos/juejin.ico + description: 字节旗下技术分享社区 +- name: CSDN + url: https://www.csdn.net/ + img: /images/logos/csdn.ico + description: 专业开发者社区 +- name: stackoverflow + url: https://stackoverflow.com/ + img: /images/logos/stackoverflow.ico + description: 国外的社区 +- name: mvn repo + url: https://mvnrepository.com/ + img: /images/logos/mvn.png + description: maven仓库 +- name: Docker Hub + url: https://hub.docker.com/ + img: /images/logos/dockerhub.ico + description: Docker Hub \ No newline at end of file diff --git a/webSetup/raw/good-tools.yml b/webSetup/raw/good-tools.yml new file mode 100644 index 0000000..e91cb0a --- /dev/null +++ b/webSetup/raw/good-tools.yml @@ -0,0 +1,64 @@ +- name: 谷歌翻译 + url: https://translate.google.com/ + img: /images/logos/googletranslate.ico + description: 翻了墙用这个 +- name: 有道翻译 + url: https://fanyi.youdao.com/ + img: /images/logos/youdao.webp + description: 没翻墙用这个 +- name: ChatGPT + url: https://chat.openai.com/ + img: /images/logos/openai.ico + description: 啥都可以问 +- name: POE + url: https://poe.com/ + img: /images/logos/poe.ico + description: 香港节点可以访问 +- name: ico51 + url: https://www.ico51.cn/ + img: /images/logos/ico.ico + description: 制作透明ico +- name: CoverView + url: https://coverview.vercel.app/ + img: /images/logos/coverview.png + description: 博客文章封面制作神器 +- name: tiny png + url: https://tinypng.com/ + img: /images/logos/tinypng.png + description: 在线图片压缩 +- name: tutieshi + url: https://www.tutieshi.com/ + img: /images/logos/tutieshi.ico + description: 一个免费的视频格式转换以及图片压缩网站 +- name: itool + url: https://itool.co/ + img: /images/logos/itool.ico + description: 各种好用工具 +- name: matools + url: https://www.matools.com/ + img: /images/logos/matools.png + description: 程序员好用的工具 +- name: wormhole + url: https://wormhole.app/ + img: /images/logos/wormhole.ico + description: 好用的在线文件传输工具 +- name: draw io + url: https://app.diagrams.net/ + img: /images/logos/drawio.ico + description: 画图工具,流程图、类图等各种图 +- name: hotbox + url: https://www.hotbox.fun/ + img: /images/logos/hotbox.ico + description: 一个完全免费的视频下载网站 +- name: 超能搜 + url: https://www.chaonengsou.com/ + img: /images/logos/chaonengsou.ico + description: 一个资源搜索类网站,提供了十几个搜索工具 +- name: ZLibrary + url: https://zh.z-library.se/ + img: /images/logos/zlibrary.ico + description: 全球最大的科学文章库。70,000,000+ 免费文章,下书神器 +- name: Libgen + url: https://libgen.rocks/ + img: /images/logos/libgen.ico + description: 一个全网最大的网上图书馆网站,提供了国内外的电子版书籍 \ No newline at end of file diff --git a/webSetup/raw/paper-search.yml b/webSetup/raw/paper-search.yml new file mode 100644 index 0000000..796cdff --- /dev/null +++ b/webSetup/raw/paper-search.yml @@ -0,0 +1,16 @@ +- name: webofscience + url: https://webofscience.clarivate.cn/wos/alldb/basic-search + img: /images/logos/webofscience.ico + description: 连校园网才能免费下载 +- name: paperswithcode + url: https://paperswithcode.com/ + img: /images/logos/paperswithcode.ico + description: 找有代码的论文来这里 +- name: 中国知网 + url: https://www.cnki.net/ + img: /images/logos/cnki.ico + description: 知网是什么? +- name: 熊猫学术 + url: https://panda985.com/ + img: /images/logos/panda985.png + description: 啥论文都能搜 \ No newline at end of file diff --git a/webSetup/raw/personal-blog.yml b/webSetup/raw/personal-blog.yml new file mode 100644 index 0000000..3288d05 --- /dev/null +++ b/webSetup/raw/personal-blog.yml @@ -0,0 +1,20 @@ +- name: Miykahの后备箱 + url: https://blog.miykah.top/ + img: /images/avatar.webp + description: 一个懒人的博客 +- name: Blog Console + url: https://blog.miykah.top/console + img: /images/avatar.webp + description: 博客后台系统 +- name: Umami + url: https://umami.miykah.top/ + img: /images/logos/umami.ico + description: Umami - 自建网站访问流量统计分析 +- name: 思源笔记-知识库 + url: https://note.miykah.top/ + img: /images/logos/siyuan.png + description: 自托管思源笔记,个人知识库管理 +- name: Miykah's Wiki + url: https://wiki.miykah.top/ + img: /images/logos/wiki.png + description: 基于 mkdocs-material 的个人知识库 \ No newline at end of file diff --git a/webSetup/raw/personal-server.yml b/webSetup/raw/personal-server.yml new file mode 100644 index 0000000..8ec5412 --- /dev/null +++ b/webSetup/raw/personal-server.yml @@ -0,0 +1,16 @@ +- name: 1Panel面板 + url: https://panel.miykah.top/miykah + img: /images/logos/1Panel.png + description: 1Panel 是一个现代化、开源的 Linux 服务器运维管理面板 +- name: Portainer + url: https://docker.miykah.top/ + img: /images/logos/docker.png + description: Docker 后台可视化管理 +- name: 站点监控 + url: https://status.miykah.top/ + img: /images/logos/status.ico + description: 监控自己站点运行状态(Uptime-Kuma) +- name: glances 服务器监控 + url: https://monitor.miykah.top/ + img: /images/logos/glances.png + description: 全面的 Linux 系统监控工具 diff --git a/webSetup/raw/personal-tools.yml b/webSetup/raw/personal-tools.yml new file mode 100644 index 0000000..27939db --- /dev/null +++ b/webSetup/raw/personal-tools.yml @@ -0,0 +1,48 @@ +- name: AList 个人云盘 + url: https://cloud.miykah.top/ + img: /images/logos/alist.svg + description: 基于开源软件 AList 搭建的个人云盘 +- name: Pingvin Share + url: https://share.miykah.top/ + img: /images/logos/pingvin-share.png + description: 自建文件共享平台 +- name: Memos + url: https://memos.miykah.top/ + img: /images/logos/memos.png + description: 轻量级、自托管的备忘录中心 +- name: Gitea + url: https://gitea.miykah.top/ + img: /images/logos/gitea.png + description: Gitea 是一个轻量级的自托管Git服务,用于在Linux服务器上搭建私有的Git存储库和代码托管平台 +- name: GPT 学术优化 + url: https://academic.gpt.miykah.top/ + img: /images/logos/gpt-academic.png + description: 用 GPT 来翻译、优化、润色、分析代码 +- name: Daily Hot + url: https://hot.miykah.top/ + img: /images/logos/hot.png + description: 汇聚全网热点,热门尽览无余 +- name: Huntly + url: https://huntly.miykah.top/ + img: /images/logos/huntly.ico + description: RSS 聚合阅读器,收录自己常看媒体 +- name: ChatGPT Next Web + url: https://chat.miykah.top/ + img: /images/logos/chat.ico + description: 基于开源项目ChatGPT Next Web搭建自己的ChatGPT +- name: 思维导图 + url: https://mindmap.miykah.top/ + img: /images/logos/mindmap.png + description: 思维导图是一种图形化工具,用树状结构和关键词展示信息,帮助组织思维、显示关系和提高效率 +- name: drawio + url: https://drawio.miykah.top/ + img: /images/logos/drawio.ico + description: 自己部署的画图工具,流程图、类图等各种图 +- name: it-tools 自己部署的 it 工具箱 + url: https://itools.miykah.top/ + img: /images/logos/it-tools.png + description: IT-Tools是一款开源的个人工具箱,专为IT从业人员打造 +- name: Linux命令大全 + url: https://linux-command.miykah.top/ + img: /images/logos/linux-command.jpg + description: 这是一个 Linux 命令大全,可以搜索查看 Linux 命令及使用方法示例 \ No newline at end of file diff --git a/webSetup/raw/popular-websites.yml b/webSetup/raw/popular-websites.yml new file mode 100644 index 0000000..d8f1d20 --- /dev/null +++ b/webSetup/raw/popular-websites.yml @@ -0,0 +1,32 @@ +- name: BiliBili + url: https://www.bilibili.com/ + img: /images/logos/bilibili.ico + description: 哔哩哔哩,干杯 +- name: 斗鱼 + url: https://www.douyu.com/ + img: /images/logos/douyu.ico + description: 每个人的直播平台 +- name: 虎牙 + url: https://www.huya.com/ + img: /images/logos/huya.ico + description: 老虎的牙齿 +- name: 知乎 + url: https://www.zhihu.com/ + img: /images/logos/zhihu.ico + description: 人在知乎,刚下飞机 +- name: 虎扑 + url: https://www.hupu.com/ + img: /images/logos/hupu.ico + description: JRS +- name: 百度贴吧 + url: https://tieba.baidu.com/ + img: /images/logos/tieba.ico + description: 虾头♂聚集地 +- name: 新浪微博 + url: https://weibo.com/ + img: /images/logos/weibo.ico + description: 就看个热搜 +- name: 做饭指南 + url: https://cook.aiurs.co/ + img: /images/logos/cook.png + description: 程序员做饭指南 \ No newline at end of file diff --git a/webSetup/raw/study-websites.yml b/webSetup/raw/study-websites.yml new file mode 100644 index 0000000..93cee0b --- /dev/null +++ b/webSetup/raw/study-websites.yml @@ -0,0 +1,12 @@ +- name: LeetCode + url: https://leetcode.cn/ + img: /images/logos/leetcode.ico + description: 谁还不刷算法题? +- name: 牛客网 + url: https://www.nowcoder.com/ + img: /images/logos/nowcoder.ico + description: 求职论坛 +- name: 语雀 + url: https://www.yuque.com/dashboard + img: /images/logos/yuque.png + description: 记笔记很好用! \ No newline at end of file diff --git a/webSetup/raw/webstack.yml b/webSetup/raw/webstack.yml new file mode 100644 index 0000000..fe1c85b --- /dev/null +++ b/webSetup/raw/webstack.yml @@ -0,0 +1,117 @@ +favicon: /images/favicon.png +banner: /images/logo.png + +logo: + expanded: /images/logo.png + collapsed: /images/avatar.webp + dark: /images/logo-about.png + +flag: + - name: Chinese + default: true + icon: flag-cn + index: / + +search: true + +userDefinedSearchData: + custom: false + thisSearch: https://www.baidu.com/s?wd= + thisSearchIcon: url(https://www.baidu.com/favicon.ico) + hotStatus: true + data: + - name: 百度 + img: url(https://www.baidu.com/favicon.ico) + url: https://www.baidu.com/s?wd= + +githubCorner: '' + +expandAll: true +menu: + - name: 常用网站 + icon: far fa-star + config: popular-websites + - name: 个人站 + icon: fas fa-user + submenu: + - name: 个人博客 + icon: fas fa-blog + config: personal-blog + - name: 个人服务器 + icon: fas fa-server + config: personal-server + - name: 个人工具 + icon: fas fa-tools + config: personal-tools + - name: 学习网站 + icon: fas fa-book + config: study-websites + - name: 编程相关 + icon: fas fa-code + config: coding-websites + - name: 好用工具 + icon: fas fa-tools + config: good-tools + - name: 文献查阅 + icon: fas fa-edit + config: paper-search + +about: + url: /about/ + icon: far fa-heart + name: 关于本站 + +aboutPage: + website: + head: 关于本站 + html: '

本站汇总个人常用网站,作为自己的一个导航网站。

' + webmaster: + head: 关于站长 + name: Miykahdの后备箱 + url: https://blog.miykah.top/ + img: /images/avatar.webp + description: 懒 + +since: 2023 + +busuanzi: + enable: true + position: sidebar # 'footer','sidebar' + pv: 本站总访问量$pv + uv: 本站总访客数$uv + +custom: + head: |- # 以下内容插入到标签内,可设置多行,注意每行开头至少四个空格 + + + body: |- # 以下内容插入到标签之前,可设置多行,注意每行开头至少四个空格 + + + + +js: + header: /js/header.js + footer: /js/footer.js + jquery: /js/jquery-1.11.1.min.js + bootstrap: /js/bootstrap.min.js + TweenMax: /js/TweenMax.min.js + resizeable: /js/resizeable.min.js + joinable: /js/joinable.js + xenonApi: /js/xenon-api.min.js + xenonToggles: /js/xenon-toggles.min.js + xenonCustom: /js/xenon-custom.min.js + lozad: /js/lozad.min.js + html5shiv: /js/html5shiv.min.js + respond: /js/respond.min.js + busuanzi: https://busuanzi.ibruce.info/busuanzi/2.3/busuanzi.pure.mini.js + +css: + hclonely: /css/hclonely.css + fonts: //fonts.loli.net/css?family=Arimo:400,700,400italic + linecons: /css/fonts/linecons/css/linecons.min.css + fontawesome: /css/fonts/fontawesome/css/all.min.css + bootstrap: /css/bootstrap.min.css + xenonCore: /css/xenon-core.min.css + xenonComponents: /css/xenon-components.min.css + xenonSkins: /css/xenon-skins.min.css + nav: /css/nav.min.css diff --git a/webSetup/tools.json b/webSetup/tools.json new file mode 100644 index 0000000..9d5e682 --- /dev/null +++ b/webSetup/tools.json @@ -0,0 +1 @@ +[{"id":1,"name":"chatgpt","url":"http://119.91.214.52:20002","logo":"http://119.91.214.52:20002/favicon.ico","catelog":"常用","desc":"自己搭建的GPT","sort":1,"hide":false}] \ No newline at end of file diff --git a/webSetup/编程导航格式转换.py b/webSetup/编程导航格式转换.py new file mode 100644 index 0000000..f02c458 --- /dev/null +++ b/webSetup/编程导航格式转换.py @@ -0,0 +1,8 @@ +#-*- encoding:utf-8 -*- + +''' +@Author : dingjiawen +@Date : 2024/03/20 15:30 +@Usage : +@Desc : +''' \ No newline at end of file