liyuyi@c-top.com.cn 3 年之前
父节点
当前提交
6294e3eef0

+ 17 - 3
config/config.yaml

@@ -1,13 +1,27 @@
 source_name_map:
   1:
-      name: '内部'
+      name: '内部创意'
       table: 'ctop_ai_video_info_from_huichuang'
+      status: 1
   2:
-      name: '头条巨量'
+      name: '巨量创意'
       table: 'ctop_ai_video_info_from_ocean_engine'
+      status: 1
   3:
-      name: '快手开眼'
+      name: '开眼快创'
       table: 'ctop_ai_material_info_from_kuaishou_kaiyan'
+      status: 1
+
+mac_ip_config:
+  # 开发环境
+  '5254003fa716':
+      url: "http://139.186.165.84:31013/"
+  # 测试环境
+  '52540003f5dd':
+      url: "http://139.186.27.96:31013/"
+  # 生产环境
+  '525400c98142':
+      url: "http://114.117.193.186:31013/"
 
 
 # jeecg-boot 生产数据库

+ 8 - 4
config/url.py

@@ -1,14 +1,18 @@
 import uuid
+import yaml
+
+
+with open('/data/pythonProject/video_to_word/config/config.yaml', mode='r', encoding='utf-8') as f:
+    config = yaml.load(f.read(), Loader=yaml.FullLoader)
+    mac_ip_config = config['mac_ip_config']
 
 mac = uuid.UUID(int=uuid.getnode()).hex[-12:]
-debug_url = "http://139.186.165.84:31013/"
-product_url = "http://139.186.27.96:31013/"
 
 # 向腾讯云发送语音转脚本的任务请求url
-voice_to_script_task_submit_url = (debug_url if mac == '5254003fa716' else product_url) + 'asr/task/submit'
+voice_to_script_task_submit_url = mac_ip_config[mac]['url'] + 'asr/task/submit'
 
 # 依据task_id,向腾讯云获取脚本
-voice_to_script_task_result_url = (debug_url if mac == '5254003fa716' else product_url) + 'asr/task/result'
+voice_to_script_task_result_url = mac_ip_config[mac]['url'] + 'asr/task/result'
 
 # 巨量引擎获取优质素材信息
 get_material_info_from_ocean_engine_url = "https://cc.oceanengine.com/creative_radar_api/v1/material/list"

+ 15 - 3
get_material_and_script/get_material_from_huichuang.py

@@ -1,9 +1,8 @@
 import datetime
 from datetime import timedelta
-
 import pandas as pd
-
 from common_func import mysql_replace_into
+from get_material_and_script.get_script_from_tengxunyun import GetScriptFromTengXunYunServer
 
 
 class GetMaterialFromHuiChuang(object):
@@ -20,6 +19,7 @@ class GetMaterialFromHuiChuang(object):
         # 1 依据查询词获取 account_id (只获取快手的素材)
         # `media_id`  '平台类型 1 头条 2快手',
         # `account_status`  '0 启动 1 禁用',
+        self.logger.info(f"查询词:{self.query_word}, 获取公司内部优质素材,开始执行... ")
         sql = f"select distinct(account_id) account_id from ctop_user_allocation where project_name like '%%{self.query_word}%%' " \
               f"and media_id = 2"
         account_id_lst = pd.read_sql(sql, self.jeecg_boot_db_engine).account_id.values
@@ -35,7 +35,8 @@ class GetMaterialFromHuiChuang(object):
                 sql = f"select signature, activation , charge " \
                       f"from ctop_kuaishou_report_daily_material  " \
                       f"where account_id in {tuple(list(account_id_lst) * 2) if account_id_lst.size == 1 else tuple(account_id_lst)} " \
-                      f"and stat_date = '{stat_date}'"
+                      f"and stat_date = '{stat_date}'" \
+                      f"and signature is not null"
                 one_date_df = pd.read_sql(sql, self.jeecg_boot_db_engine)
                 full_date_df = full_date_df.append(one_date_df)
 
@@ -56,6 +57,8 @@ class GetMaterialFromHuiChuang(object):
         if not self.high_material_df.empty and not self.video_url_df.empty:
             self.video_df = self.high_material_df.merge(self.video_url_df, on='signature', how='inner')
 
+        self.logger.info(f"查询词:{self.query_word}, 获取公司内部优质素材,共获取到{len(self.video_df)}个")
+
         # 5 获取到的信息存入数据库
         if not self.video_df.empty:
             self.video_df['query_word'] = self.query_word
@@ -67,3 +70,12 @@ class GetMaterialFromHuiChuang(object):
                                  index=False,
                                  chunksize=1000,
                                  method=mysql_replace_into)
+
+    def get_script_from_teng_xun_yun(self):
+        get_script_ins = GetScriptFromTengXunYunServer(logger=self.logger,
+                                                       db_engine=self.ai_word_db_engine,
+                                                       task_df=self.video_df[['signature', 'video_url']],
+                                                       task_ids=None)
+        get_script_ins.submit_task()
+        if get_script_ins.task_ids:
+            get_script_ins.get_result()

+ 19 - 7
get_material_and_script/get_material_from_kuaishou_kaiyan.py

@@ -1,10 +1,13 @@
 import datetime
 import json
 import traceback
+
 import pandas as pd
 import requests
+
 from common_func import NpEncoder, get_db_engine, get_logger, mysql_replace_into
 from config.url import get_video_info_from_kuaishou_kaiyan
+from get_material_and_script.get_script_from_tengxunyun import GetScriptFromTengXunYunServer
 
 
 class GetMaterialFromKuaishouKaiyan(object):
@@ -22,10 +25,10 @@ class GetMaterialFromKuaishouKaiyan(object):
         page_num = 1
         page_size = 10
         total_size = 0
-        self.logger.info("查询词:%s, 调用快手开眼快创视频接口,开始执行-------------------- " % self.query_word)
+        self.logger.info(f"查询词:{self.query_word}, 调用快手开眼快创视频接口,开始执行...")
         while True:
             # 分页获取
-            self.logger.info("查询词:%s, 调用快手开眼快创视频接口, 分页获取第 %s 页" % (self.query_word, page_num))
+            self.logger.info(f"查询词:{self.query_word}, 调用快手开眼快创视频接口,分页获取第 {page_num} 页")
             request_data = {"inspiredSortTypeId": 0,
                             "platformSourceId": 0,
                             "formatId": 0,
@@ -55,17 +58,17 @@ class GetMaterialFromKuaishouKaiyan(object):
                 else:
                     page_num += 1
             except:
-                self.logger.error("查询词:%s, 调用快手开眼快创视频接口, 获取第%s页时发生异常信息: %s" %
-                                  (self.query_word, page_num, traceback.format_exc()))
+                self.logger.error(f"查询词:{self.query_word}, 调用快手开眼快创视频接口,获取第{page_num}页时发生异常信息:{traceback.format_exc()}")
 
-        self.logger.info("查询词:%s, 调用快手开眼快创视频接口,结束执行,共 %s 个视频信息!-------------------- " % (self.query_word, total_size))
+        self.logger.info(f"查询词:{self.query_word}, 调用快手开眼快创视频接口,结束执行,共 {total_size} 个视频!")
 
         # 2 获取到的信息存入数据库
         # 2-1 数据字段类型的处理,方便入库
         # mainMvUrls: list to  str
         # coverThumbnailUrls:  list to str
         # headUrls:  list to str
-        if not self.video_df.empty:
+        if not self.video_df.empty and not self.video_df[~self.video_df.photoId.isnull()].empty:
+            self.video_df = self.video_df[self.video_df.photoId.isnull()]
             self.video_df[['mainMvUrls', 'coverThumbnailUrls', 'headUrls', 'photoId']] = \
                 self.video_df[['mainMvUrls', 'coverThumbnailUrls', 'headUrls', 'photoId']].astype(str)
 
@@ -75,7 +78,7 @@ class GetMaterialFromKuaishouKaiyan(object):
             self.video_df['query_word'] = self.query_word
             self.video_df['stat_date'] = datetime.datetime.today().strftime('%Y-%m-%d')
 
-            # 2-3 写入数据库, 表中以 photoId + query_word + stat_date 作为联合唯一键,写入数据库时如果唯一键重复,则 replace_into
+            # 2-3 写入数据库, 表中以 signature + query_word + stat_date 作为联合唯一键,写入数据库时如果唯一键重复,则 replace_into
             try:
                 self.video_df.to_sql(name="ctop_ai_material_info_from_kuaishou_kaiyan",
                                      con=self.db_engine,
@@ -86,6 +89,15 @@ class GetMaterialFromKuaishouKaiyan(object):
             except:
                 self.logger.error(traceback.format_exc())
 
+    def get_script_from_teng_xun_yun(self):
+        get_script_ins = GetScriptFromTengXunYunServer(logger=self.logger,
+                                                       db_engine=self.db_engine,
+                                                       task_df=self.video_df[['signature', 'video_url']],
+                                                       task_ids=None)
+        get_script_ins.submit_task()
+        if get_script_ins.task_ids:
+            get_script_ins.get_result()
+
 
 if __name__ == '__main__':
     db_info = {'user': 'hcst',

+ 41 - 35
get_material_and_script/get_material_from_ocean_engine.py

@@ -1,11 +1,14 @@
-import pandas as pd
-from config.url import get_material_info_from_ocean_engine_url, get_video_info_from_ocean_engine_url
-from urllib.parse import urlencode
-import requests
+import datetime
 import json
 import traceback
-import datetime
+from urllib.parse import urlencode
+
+import pandas as pd
+import requests
+
 from common_func import mysql_replace_into, NpEncoder, get_db_engine, get_logger
+from config.url import get_material_info_from_ocean_engine_url, get_video_info_from_ocean_engine_url
+from get_material_and_script.get_script_from_tengxunyun import GetScriptFromTengXunYunServer
 
 
 class GetMaterialFromOceanEngine(object):
@@ -22,7 +25,7 @@ class GetMaterialFromOceanEngine(object):
         依据关键词调用巨量引擎接口,获取物料列表,得到物料基本信息如:物料的最佳标题、行业标签、video_id等
         """
         # 1 请求巨量引擎接口获取物料基本信息
-        self.logger.info("查询词:%s, 调用头条巨量引擎物料接口,开始执行-------------------- " % self.query_word)
+        self.logger.info(f"查询词:{self.query_word}, 调用头条巨量引擎物料接口,开始执行... ")
         material_df = pd.DataFrame()  # 初始化返回的结果
         has_more = True  # 是否还存在分页数据, 初始化为 True
         limit = 10  # 每页获取10条
@@ -42,6 +45,7 @@ class GetMaterialFromOceanEngine(object):
                             'video_duration_type': 5}
 
             try:
+                self.logger.info(f"查询词:{self.query_word},调用头条巨量引擎物料接口, 分页获取第 {page} 页")
                 request_path = get_material_info_from_ocean_engine_url + '?' + urlencode(request_data)
                 request = requests.get(request_path)
                 result = json.loads(request.text)
@@ -49,15 +53,12 @@ class GetMaterialFromOceanEngine(object):
                 material_df = material_df.append(material_page_df)
                 if result.get('code') == 0 and result.get('data').get('has_more') is True:
                     page += 1
-                    self.logger.info("查询词:%s, 调用头条巨量引擎物料接口, 分页获取第 %s 页" % (self.query_word, page))
                 else:
                     has_more = False
             except:
-                self.logger.error("查询词:%s, 调用头条巨量引擎物料接口, 分页获取时发生异常信息: %s" %
-                                  (self.query_word, traceback.format_exc()))
+                self.logger.error(f"查询词:{self.query_word}, 调用头条巨量引擎物料接口,分页获取时发生异常信息: {traceback.format_exc()}")
 
-        self.logger.info("查询词:%s, 调用 调用头条巨量引擎物料接口, 结束执行,共 %s 个物料信息!" %
-                         (self.query_word, len(material_df)))
+        self.logger.info(f"查询词:{self.query_word}, 调用头条巨量引擎物料接口, 执行结束,共 {len(material_df)} 个物料信息!")
 
         # 2 获取到的信息存入数据库
         # 2-1 数据字段类型的处理,方便入库
@@ -94,23 +95,19 @@ class GetMaterialFromOceanEngine(object):
         cnt_per_request = 10
         # 总的视频个数
         total_cnt = len(self.signature_lst)
-        self.logger.info("查询词:%s, 调用头条巨量引擎视频接口,共 %s 个开始执行----------------------" % (self.query_word, total_cnt))
+        self.logger.info(f"查询词:{self.query_word}, 调用头条巨量引擎视频接口,共{total_cnt}个开始执行....")
         for i in range(0, total_cnt, cnt_per_request):
             if i + cnt_per_request < total_cnt:
                 query_ids = self.signature_lst[i: i + cnt_per_request]
-                self.logger.info("查询词:%s, 调用头条巨量引擎视频接口 分页获取第 %s 个 到 %s 个视频信息" %
-                                 (self.query_word, i, i + cnt_per_request))
+                self.logger.info(f"查询词:{self.query_word}, 调用头条巨量引擎视频接口,分页获取第{i+1}个到{i+cnt_per_request}个视频信息")
             else:
                 query_ids = self.signature_lst[i:]
-                self.logger.info("查询词:%s, 调用头条巨量引擎视频接口 分页获取第 %s 个 到 %s 个视频信息" %
-                                 (self.query_word, i, total_cnt))
-
+                self.logger.info(f"查询词:{self.query_word}, 调用头条巨量引擎视频接口,分页获取第{i+1}个到{total_cnt}个视频信息")
             try:
                 request_data = {"query_ids": query_ids, "water_mark": "creative_center"}
                 request = requests.post(url=get_video_info_from_ocean_engine_url,
                                         headers={'Content-Type': 'application/json'},
-                                        data=json.dumps(request_data, cls=NpEncoder)
-                                        )
+                                        data=json.dumps(request_data, cls=NpEncoder))
                 response_data = json.loads(request.text)
 
                 if response_data.get('code') == 0 and response_data.get('data'):
@@ -120,28 +117,38 @@ class GetMaterialFromOceanEngine(object):
                         single_df = pd.DataFrame([single_dict])
                         self.video_df = self.video_df.append(single_df)
             except:
-                self.logger.error("查询词:%s, 调用头条巨量引擎视频接口 分页获取 %s,出现异常信息:%s" %
-                                  (self.query_word, query_ids, traceback.format_exc()))
+                self.logger.error(f"查询词:{self.query_word}, 调用头条巨量引擎视频接口,分页获取{query_ids},出现异常信息:{traceback.format_exc()}")
 
-        self.logger.info("查询词:%s, 调用头条巨量引擎视频接口,共 %s 个执行完成----------------------" % (self.query_word, total_cnt))
+        self.logger.info(f"查询词:{self.query_word}, 调用头条巨量引擎视频接口,共{total_cnt}个执行完成!")
 
         # 2 获取到的信息存入数据库
         # 2-1  数据类型的处理,方便入库
         # play_info: list to  str
-        self.video_df['play_info'] = self.video_df['play_info'].astype(str)
-        self.video_df.drop(labels='video_id', axis=1, inplace=True)
+        if not self.video_df.empty and not self.video_df[~self.video_df.signature.isnull()].empty:
+            self.video_df = self.video_df[~self.video_df.signature.isnull()]
+            self.video_df['play_info'] = self.video_df['play_info'].astype(str)
+            self.video_df.drop(labels='video_id', axis=1, inplace=True)
 
-        # 2-2 添加查询词和日期
-        self.video_df['query_word'] = self.query_word
-        self.video_df['stat_date'] = datetime.datetime.today().strftime('%Y-%m-%d')
+            # 2-2 添加查询词和日期
+            self.video_df['query_word'] = self.query_word
+            self.video_df['stat_date'] = datetime.datetime.today().strftime('%Y-%m-%d')
 
-        # 2-3 写入数据库, 表中以 signature + query_word 作为联合唯一键,写入数据库时如果唯一键重复,则 replace_into
-        self.video_df.to_sql(name="ctop_ai_video_info_from_ocean_engine",
-                        con=self.db_engine,
-                        if_exists='append',
-                        index=False,
-                        chunksize=1000,
-                        method=mysql_replace_into)
+            # 2-3 写入数据库, 表中以 signature + query_word + stat_date 作为联合唯一键,写入数据库时如果唯一键重复,则 replace_into
+            self.video_df.to_sql(name="ctop_ai_video_info_from_ocean_engine",
+                                 con=self.db_engine,
+                                 if_exists='append',
+                                 index=False,
+                                 chunksize=1000,
+                                 method=mysql_replace_into)
+
+    def get_script_from_teng_xun_yun(self):
+        get_script_ins = GetScriptFromTengXunYunServer(logger=self.logger,
+                                                       db_engine=self.db_engine,
+                                                       task_df=self.video_df[['signature', 'video_url']],
+                                                       task_ids=None)
+        get_script_ins.submit_task()
+        if get_script_ins.task_ids:
+            get_script_ins.get_result()
 
 
 if __name__ == '__main__':
@@ -160,4 +167,3 @@ if __name__ == '__main__':
     inst.get_material_basic_info()
     if inst.signature_lst:
         inst.get_video_basic_info()
-

+ 20 - 23
get_material_and_script/get_script_from_tengxunyun.py

@@ -13,7 +13,7 @@ class GetScriptFromTengXunYunServer(object):
         self.logger = logger
         self.db_engine = db_engine
         self.task_ids = task_ids
-        self.task_df = task_df[~task_df.signature.isnull()]
+        self.task_df = task_df
 
     def submit_task(self):
         """
@@ -21,12 +21,12 @@ class GetScriptFromTengXunYunServer(object):
         task_df: DataFrame columns 包含 signature 和 video_url
         """
         # 1 获取已经被提交过的任务
-        sql = """select signature from tb_asr_result """
+        sql = f"select signature from tb_asr_result"
         submitted_task_df = pd.read_sql(sql, self.db_engine)
 
         # 2 需要提交的任务,去掉历史被提交过的任务,防止重复提交浪费服务时长
         to_submit_task_df = self.task_df[~self.task_df.signature.isin(submitted_task_df.signature.values)]
-        self.logger.info("向腾讯云提交语音转脚本的任务个数为 %s" % len(to_submit_task_df))
+        self.logger.info(f"向腾讯云提交语音转脚本的任务个数为{len(to_submit_task_df)}")
 
         # 3 发送请求,提交任务
         for index, row in to_submit_task_df.iterrows():
@@ -37,16 +37,14 @@ class GetScriptFromTengXunYunServer(object):
             request = requests.post(request_full_path)
             try:
                 result = json.loads(request.text)
-                self.logger.info("素材:%s, 向腾讯云提交语音转脚本任务的返回信息为:%s " % (material_md5, result))
+                self.logger.info(f"素材:{material_md5}, 向腾讯云提交语音转脚本任务的返回信息为{result}")
             except:
-                self.logger.error("素材:%s, 向腾讯云提交语音转脚本任务的返回信息为:%s " % (material_md5, request.text))
+                self.logger.error(f"素材:{material_md5}, 向腾讯云提交语音转脚本任务的返回信息为为{ request.text}")
 
-        # 4 获取素材对应的发送请求的 task_id
-        if len(self.task_df.signature.values) == 1:
-            signature_for_sql = list(self.task_df.signature.values) * 2
-        else:
-            signature_for_sql = list(self.task_df.signature.values)
-        sql = """select task_id from tb_asr_result where signature in %s""" % (tuple(signature_for_sql),)
+        # 4 获取素材对应的 task_id
+        signature_lst = list(self.task_df.signature.values)
+        sql = f"select task_id from tb_asr_result where signature " \
+              f"in {tuple(signature_lst*2) if len(signature_lst) == 1 else tuple(signature_lst)}"
         task_id_df = pd.read_sql(sql, self.db_engine)
         self.task_ids = list(task_id_df['task_id'].values)
 
@@ -58,22 +56,17 @@ class GetScriptFromTengXunYunServer(object):
         StatusStr String 任务状态,waiting:任务等待,doing:任务执行中,success:任务成功,failed:任务失败。
         ErrorMsg String 失败原因说明。
         """
-        self.logger.info("从腾讯云获取脚本的个数为%s" % len(self.task_ids))
+        self.logger.info(f"需要获取的脚本个数: {len(self.task_ids)}")
+        # 从腾讯云获取脚本的最大重试次数
         retry_upper_cnt = 10
         retry_cnt = 1
-
-        if len(self.task_ids) == 1:
-            self.task_ids = self.task_ids * 2
-
         while retry_cnt <= retry_upper_cnt:
-            sql = """select task_id, task_status from tb_asr_result where task_id in %s and task_status in (0,1)""" % \
-                  (tuple(self.task_ids),)
+            sql = f"select task_id, task_status from tb_asr_result where task_id " \
+                  f"in {tuple(self.task_ids * 2) if len(self.task_ids)==1 else tuple(self.task_ids)}" \
+                  f" and task_status in (0,1)"
             task_status_df = pd.read_sql(sql, self.db_engine)
             if task_status_df.empty:
                 break
-            else:
-                self.logger.info("从腾讯云获取脚本, 休眠10s,等待腾讯云任务计算完成。")
-                time.sleep(10 * 1)
 
             for task_id in task_status_df.task_id.values:
                 request_data = {'task_id': task_id}
@@ -81,8 +74,12 @@ class GetScriptFromTengXunYunServer(object):
                 request = requests.post(request_full_path)
                 try:
                     result = json.loads(request.text)
-                    self.logger.info("task_id:%s, 从腾讯云获取脚本返回信息为:%s " % (task_id, result))
+                    self.logger.info(f"task_id:{task_id}, 从腾讯云获取脚本返回信息为:{result}")
                 except:
-                    self.logger.error("task_id:%s, 从腾讯云获取脚本返回信息为:%s " % (task_id, request.text))
+                    self.logger.error(f"task_id:{task_id}, 从腾讯云获取脚本返回信息为:{request.text} ")
+
+            if retry_cnt >= 2:
+                self.logger.info("从腾讯云获取脚本, 休眠3s,等待腾讯云任务计算完成。")
+                time.sleep(3 * 1)
 
             retry_cnt += 1

+ 257 - 124
main.py

@@ -1,29 +1,43 @@
-import datetime
 import hashlib
+import traceback
 import uuid
 from concurrent.futures import ThreadPoolExecutor
+from datetime import date
+from datetime import timedelta
 from io import BytesIO
 from typing import Optional, List
 from urllib.parse import quote
-import pymysql
+
 import pandas as pd
 import uvicorn
 import yaml
 from fastapi import FastAPI
 from fastapi.middleware.cors import CORSMiddleware
 from fastapi.responses import StreamingResponse
+from loguru import logger
 from pydantic import BaseModel, Field
 
 from asr_client import send_asr_request, send_task_request
-from common_func import get_db_engine, mysql_replace_into
+from common_func import get_db_engine
 from config.url import toutiao_static_video_url
 from database import insert, update, query, Task
 
+logger.add("logs/loguru.{time:YYYY-MM-DD}.log",
+           rotation="00:00",
+           format="{time:YYYY-MM-DD HH:mm:ss,SSS} [{process}] [{thread}] {level} {file} {line} - {message}",
+           level="INFO")
+
 with open('/data/pythonProject/video_to_word/config/config.yaml', mode='r', encoding='utf-8') as f:
     config = yaml.load(f.read(), Loader=yaml.FullLoader)
     source_name_map = config['source_name_map']
 
-ai_word_engine = get_db_engine(config['ai_word_dev_db'])
+# 数据库连接引擎,依据开发、测试环境/生产环境 进行切换
+    mac = uuid.UUID(int=uuid.getnode()).hex[-12:]
+    if mac in ['5254003fa716', '52540003f5dd']:
+        ai_word_engine = get_db_engine(config['ai_word_dev_db'])
+    else:
+        ai_word_engine = get_db_engine(config['ai_word_product_db'])
+
 
 threadPool = ThreadPoolExecutor(max_workers=4)
 app = FastAPI()
@@ -54,12 +68,12 @@ class QueryItem():
     url: Optional[str] = None
 
 
-@app.get('/')
+@app.get('/', tags=['back-end task'])
 def index():
     return {'message': '你已经正确创建 FastApi 服务!'}
 
 
-@app.post('/asr/task/submit')
+@app.post('/asr/task/submit', tags=['back-end task'])
 def task_submit(signature: str, url: str):
     json = send_asr_request(url)
     task = Task(signature=signature, task_id=json.Data.TaskId, task_result=json.to_json_string(), task_status=1)
@@ -67,7 +81,7 @@ def task_submit(signature: str, url: str):
     return {'code': 0, 'taskId': json.Data.TaskId}
 
 
-@app.post('/asr/task/result')
+@app.post('/asr/task/result', tags=['back-end task'])
 def task_submit(task_id: int):
     json = send_task_request(task_id)
     task = query(None, None, task_id)[0]
@@ -85,150 +99,269 @@ def task_submit(task_id: int):
     return {'code': 0, 'status': json.Data.StatusStr}
 
 
-@app.post('/asr/task/list')
+@app.post('/asr/task/list', tags=['back-end task'])
 def task_submit(task_status: int):
     task = query(None, task_status, None)
     return {'code': 0, 'data': task}
 
 
-class QueryWordItem(BaseModel):
-    query_word: str = Field(..., description="查询词", min_length=1)
-    stat_date: str = Field(..., description="日期", min_length=10, max_length=10)
-    source: int = Field(..., description="来源,")
+class BaseResponse(BaseModel):
+    message: str = Field(..., description='消息')
+    success: bool = Field(..., description='true or false')
+    code: int = Field(..., description='')
 
 
-@app.post('/export_excel/')
-def export_excel(item: List[QueryWordItem]):
-    video_df = pd.DataFrame()
-    if len(item) == 1:
-        # 单个条目,直接导出
-        pass
-    else:
-        # 1 从数据库获取视频数据
-        # 多个条目,如果同一个素材有多个查询词,则合并打上这多个查询词
-        for obj in item:
-            query_word = obj.query_word
-            stat_date = obj.stat_date
-            source = obj.source
-            sql = f"select signature, video_url, query_word, stat_date, {source} source from {source_name_map[source]['table']} " \
-                  f"where query_word = '{query_word}' " \
-                  f"and stat_date = '{stat_date}'"
-            df = pd.read_sql(sql, ai_word_engine)
-            video_df = video_df.append(df)
+class TaskDetail(BaseModel):
+    source_name: str = Field('内部创意', description='数据来源名称')
+    query_word: str = Field('红包', description='关键词')
+    stat_date: str = Field('2021-11-11', description='日期')
+    script_num: int = Field(180, description='脚本数量')
+    task_status: str = Field('执行成功', description='状态')
+    number: int = Field(0, description='序号')
 
-        # 按 'signature' + 'query_word' + 'stat_date' 进行去重
-        video_df.drop_duplicates(['signature', 'query_word', 'stat_date', 'source'], keep='last', inplace=True)
-        g = video_df.groupby('signature')
 
-        query_word_lst_df = g.apply(lambda x: x['query_word'].unique())
-        query_word_lst_df.name = 'query_word_lst'
+class ConfigDetail(BaseModel):
+    config_id: str = Field(..., description="脚本配置id")
+    query_word_lst: List[str] = Field(..., description="关键词")
+    create_time: str = Field(..., description="创建时间")
+    operator: str = Field(..., description="创建人")
+    number: int = Field(..., description="序号")
 
-        url_df = g.apply(lambda x: x['video_url'].values[0])
-        url_df.name = 'video_url'
 
-        source_df = g.apply(lambda x: x['source'].values[0])
-        source_df.name = 'source'
+class TaskResponse(BaseResponse):
+    total_num: int = Field(0, description="总个数")
+    page_num: int = Field(1, description="第几页")
+    page_size: int = Field(10, description="每页个数")
+    config_id: str = Field('', description="脚本配置id")
+    result: List[TaskDetail] = Field(..., description="结果详情")
 
-        video_query_word_df = pd.concat([query_word_lst_df, url_df, source_df], axis=1)
-        video_query_word_df.reset_index(inplace=True, drop=False)
 
-        video_query_word_df['video_url'] = video_query_word_df.apply(
-            lambda row: toutiao_static_video_url + row['signature'] if row.get('source') == 2 else row['video_url'], axis=1)
+class ConfigResponse(BaseResponse):
+    total_num: int = Field(0, description="总个数")
+    page_num: int = Field(1, description="第几页")
+    page_size: int = Field(10, description="每页个数")
+    result: List[ConfigDetail] = Field(..., description="结果详情")
 
-        # 2 根据第一步的视频数据获取脚本
-        if not video_query_word_df.empty:
-            sql = f"select signature, word_text from tb_asr_result where signature in " \
-                  f"{tuple(video_query_word_df.signature.values) if len(video_query_word_df.signature.values) > 1 else tuple(list(video_query_word_df.signature.values) * 2)} " \
-                  f"and task_status = 2"
-            script_df = pd.read_sql(sql, ai_word_engine)
-            out_df = video_query_word_df.merge(script_df, on='signature', how='inner')
-        else:
-            pass
 
-        # 3 返回流数据
-        if not out_df.empty:
-            bio = BytesIO()
-            writer = pd.ExcelWriter(bio, engine='xlsxwriter')
-            out_df[['signature', 'query_word_lst', 'word_text', 'video_url']].to_excel(writer, index=False, encoding='utf8mb4')
-            writer.save()
-            bio.seek(0)
+class QueryWordItem(BaseModel):
+    query_word: str = Field("红包", description="查询词", min_length=1)
+    stat_date: str = Field("2021-11-16", description="日期", min_length=10, max_length=10)
+    source_code: int = Field(2, description="数据来源编码{1:'内部创意', 2:'巨量创意', 3:'开眼快创'}")
+
 
-            # 组装header
-            now_date = datetime.date.today().strftime('%Y-%m-%d')
-            headers = {"content-type": "application/vnd.ms-excel",
-                       "content-disposition": f"attachment;filename={quote('优质素材脚本_')}{now_date}.xlsx"
-                       }
+class ScriptConfigLst(BaseModel):
+    start_date: Optional[date] = Field(date.today() + timedelta(days=-6), description="开始日期-用于查询")
+    end_date: Optional[date] = Field(date.today(), description="结束日期-用于查询")
+    search_word: Optional[str] = Field('', description="关键词-用于查询")
+    page_num: int = Field(1, description="第几页")
+    page_size: int = Field(10, description="每页的大小")
 
-            return StreamingResponse(bio, media_type='xlsx', headers=headers)
 
-    return None
+class QueryWordTaskInfoLst(BaseModel):
+    start_date: Optional[date] = Field(date.today() + timedelta(days=-30), description="开始日期-用于查询")
+    end_date: Optional[date] = Field(date.today(), description="结束日期-用于查询")
+    search_word: Optional[str] = Field('', description="关键词-用于查询")
+    page_num: int = Field(1, description="第几页")
+    page_size: int = Field(10, description="每页的大小")
+    config_id: Optional[str] = Field('', description="脚本配置id")
+    source_code: Optional[List[int]] = Field([0], description="数据来源编码{1:'内部创意', 2:'巨量创意', 3:'开眼快创'}")
 
 
-class ScriptConfig(BaseModel):
-    query_word_lst: List = Field(..., description="关键词组")
+class AddScriptConfig(BaseModel):
+    query_word_lst: List[str] = Field(..., description="关键词组")
     operator: str = Field(..., description="操作者")
 
+    class Config:
+        schema_extra = {
+            "example": {
+                "query_word_lst": ["红包", "淘特"],
+                "operator": "龙猫"
+            }
+        }
+
+
+@logger.catch
+@app.post('/export_script_file/', tags=['front-end interactive'],
+          description="导出文件",
+          summary='导出文件'
+          )
+def export_script_file(item: List[QueryWordItem]):
+    video_df = pd.DataFrame()
+    # 1 从数据库获取视频数据
+    # 如果同一个素材有多个查询词,则合并打上这多个查询词
+    for obj in item:
+        query_word = obj.query_word
+        stat_date = obj.stat_date
+        source_code = obj.source_code
+        sql = f"select signature, video_url, query_word, stat_date, {source_code} source_code from {source_name_map[source_code]['table']} " \
+              f"where query_word = '{query_word}' " \
+              f"and stat_date = '{stat_date}'"
+        df = pd.read_sql(sql, ai_word_engine)
+        video_df = video_df.append(df)
+
+    # 按 'signature' + 'query_word' + 'stat_date' 进行去重
+    video_df.drop_duplicates(['signature', 'query_word', 'stat_date', 'source_code'], keep='last', inplace=True)
+
+    video_query_word_df = video_df.groupby('signature').apply(lambda x: pd.Series({'query_word_lst': x['query_word'].unique(),
+                                                                                   'video_url': x['video_url'].values[0],
+                                                                                   'source_code': x['source_code'].values[0]}))
+    video_query_word_df.reset_index(inplace=True, drop=False)
+
+    # 如果来源==2 (头条巨量引擎),把视频链接替换为永久链接
+    video_query_word_df['video_url'] = video_query_word_df.apply(
+        lambda row: toutiao_static_video_url + row['signature'] if row.get('source_code') == 2 else row['video_url'], axis=1)
+
+    # 2 根据第一步的视频数据获取脚本
+    if not video_query_word_df.empty:
+        signature_lst = list(video_query_word_df.signature.values) if len(video_query_word_df.signature.values) > 1 \
+            else list(video_query_word_df.signature.values) * 2
+        sql = f"select signature, word_text from tb_asr_result where signature in {tuple(signature_lst)}" \
+              f"and word_text is not null"
+        script_df = pd.read_sql(sql, ai_word_engine)
+        out_df = video_query_word_df.merge(script_df, on='signature', how='inner')
+
+    # 3 返回流数据
+    if not out_df.empty:
+        bio = BytesIO()
+        writer = pd.ExcelWriter(bio, engine='xlsxwriter')
+        out_df[['signature', 'query_word_lst', 'word_text', 'video_url']].to_excel(writer, index=False, encoding='utf8mb4')
+        writer.save()
+        bio.seek(0)
+
+        # 组装header
+        now_date = date.today().strftime('%Y-%m-%d')
+        headers = {"content-type": "application/vnd.ms-excel",
+                   "content-disposition": f"attachment;filename={quote('优质素材脚本_')}{now_date}.xlsx"
+                   }
+
+        return StreamingResponse(bio, media_type='xlsx', headers=headers)
 
-@app.post('/get_script_config_lst/')
-def get_script_config_lst():
-    pass
-
-
-@app.post('/add_script_config/')
-def add_script_config(item: ScriptConfig):
-    config_id = str(uuid.uuid4())
-    config_lst = []
-    for query_word in item.query_word_lst:
-        sql = f"select * from ctop_ai_query_word where query_word = '{query_word}'"
-        query_word_df = pd.read_sql(sql, ai_word_engine)
-        if not query_word_df.empty:
-            # 更新 ctop_ai_query_word
-            query_word_id = query_word_df.query_word_id.values[0]
-            script_config_conn_num = query_word_df.script_config_conn_num.values[0] + 1
-            db_con = pymysql.connect(**config['ai_word_dev_db'])
-            db_cur = db_con.cursor()
-            sql = f"update ctop_ai_query_word set script_config_conn_num = {script_config_conn_num} where query_word_id = '{query_word_id}'"
-            db_cur.execute(sql)
-            db_con.commit()
-            db_con.close()
-            # update_query_word_df = pd.DataFrame([{"query_word_id": query_word_id,
-            #                                       "query_word": query_word,
-            #                                       "script_conn_num": script_conn_num}])
-            # update_query_word_df.to_sql(name="ctop_ai_query_word",
-            #                             con=ai_word_engine,
-            #                             if_exists="append",
-            #                             method=mysql_replace_into,
-            #                             index=False)
+    return None
 
-        else:
-            query_word_id = str(uuid.uuid4())
-            new_query_word_df = pd.DataFrame([{"query_word_id": query_word_id, "query_word": query_word, "script_config_conn_num": 1}])
-            new_query_word_df.to_sql(name="ctop_ai_query_word",
-                                     con=ai_word_engine,
-                                     if_exists="append",
-                                     index=False)
-
-        config_lst.append({"config_id": config_id, "query_word_id": query_word_id})
-
-    # 新增配置记录插入到 ctop_ai_script_query_word_config
-    config_df = pd.DataFrame(config_lst)
-    config_df['operator'] = item.operator
-    config_df['operate_type'] = 1
-    config_df.to_sql(name="ctop_ai_script_query_word_config",
-                     con=ai_word_engine,
-                     if_exists='append',
-                     index=False)
-    return {"code": 0, "message": "success"}
 
+@logger.catch
+@app.post('/get_script_config_lst/', tags=['front-end interactive'], response_model=ConfigResponse,
+          description="脚本配置列表",
+          summary='脚本配置列表'
+          )
+def get_script_config_lst(item: ScriptConfigLst):
+    try:
+        end_date = item.end_date + timedelta(days=1)
+        sql = f"select * from ctop_ai_script_query_word_config " \
+              f"where start_time >= '{item.start_date}' " \
+              f"and start_time < '{end_date}' " \
+              f"and ('{item.search_word}' = '' or query_word like '%%{item.search_word}%%')"
+        org_df = pd.read_sql(sql, ai_word_engine)
+
+        g_df = org_df.groupby('config_id').apply(lambda x: pd.Series({'query_word_lst': list(x['query_word'].unique()),
+                                                                      'operator': x['operator'].min(),
+                                                                      'create_time': str(x['start_time'].min())}))
+        g_df.reset_index(drop=False, inplace=True)
+        g_df.sort_values(by='create_time', ascending=False, inplace=True)
+        g_df['number'] = list(range(1, len(g_df) + 1))
+        total_num = g_df.shape[0]
+        detail = g_df.iloc[(item.page_num - 1) * item.page_size: item.page_num * item.page_size].to_dict('records')
+        response = {'code': 0,
+                    "message": "查询成功",
+                    "success": True,
+                    "result": detail,
+                    "total_num": total_num,
+                    "page_num": item.page_num,
+                    "page_size": item.page_size}
+        logger.info(f"request body: {item}, response body: {response}")
+        return response
+    except:
+        response = {"code": -1,
+                    "message": traceback.format_exc(),
+                    "success": False,
+                    "result": None}
+        logger.error(f"request body: {item}, response body: {response}")
+        return response
+
+
+@logger.catch
+@app.post('/get_query_word_task_info_lst/', tags=['front-end interactive'], response_model=TaskResponse,
+          description="脚本数据导出列表",
+          summary='脚本数据导出列表'
+          )
+def get_query_word_task_info_lst(item: QueryWordTaskInfoLst):
+    try:
+        end_date = item.end_date + timedelta(days=1)
+        source_code_lst = item.source_code * 2 if len(item.source_code) == 1 else item.source_code
+        if item.config_id != '':
+            sql = f"select distinct(query_word) query_word from ctop_ai_script_query_word_config where config_id = {item.config_id}"
+            query_word_lst = list(pd.read_sql(sql, ai_word_engine).query_word.values)
+            if len(query_word_lst) > 0:
+                query_word_lst = query_word_lst * 2 if len(query_word_lst) == 1 else query_word_lst
+                sql = f"select * from ctop_ai_query_word_task_record where query_word in {tuple(query_word_lst)}" \
+                      f"and stat_date >= '{item.start_date}' and stat_date < '{end_date}' " \
+                      f"and ('{item.source_code}' = '[0]' or source_code in {tuple(source_code_lst)}) " \
+                      f"and ('{item.search_word}' = '' or query_word = '{item.search_word}')"
+                df = pd.read_sql(sql, ai_word_engine)
+        else:
+            sql = f"select * from ctop_ai_query_word_task_record where " \
+                  f"stat_date >= '{item.start_date}' and stat_date < '{end_date}' " \
+                  f"and ('{item.source_code}' = '[0]' or source_code in {tuple(source_code_lst)}) " \
+                  f"and ('{item.search_word}' = '' or query_word = '{item.search_word}')"
+            df = pd.read_sql(sql, ai_word_engine)
 
-if __name__ == '__main__':
-    # 1 读取配置文件
+        df['source_name'] = df['source_code'].apply(lambda x: source_name_map[x]['name'])
+        df = df[['source_name', 'query_word', 'stat_date', 'script_num', 'task_status']]
+        df.sort_values(['stat_date', 'source_name', 'query_word'], ascending=False, inplace=True)
+        df['number'] = list(range(1, len(df) + 1))
+        total_num = df.shape[0]
+        detail = df.iloc[(item.page_num - 1) * item.page_size: item.page_num * item.page_size].to_dict('records')
+
+        response = {'code': 0,
+                    "message": "查询成功",
+                    "success": True,
+                    "result": detail,
+                    "total_num": total_num,
+                    "page_num": item.page_num,
+                    "page_size": item.page_size,
+                    "config_id": item.config_id}
+        logger.info(f"request body: {item}, response body: {response}")
+        return response
+    except:
+        response = {"code": -1,
+                    "message": traceback.format_exc(),
+                    "success": False,
+                    "result": None}
+        logger.error(f"request body: {item}, response body: {response}")
+        return response
+
+
+@logger.catch
+@app.post('/add_script_config/',
+          tags=['front-end interactive'],
+          description="新增脚本配置",
+          summary='新增脚本配置',
+          response_model=BaseResponse)
+def add_script_config(item: AddScriptConfig):
+    try:
+        # 按查询词拆分配置记录
+        config_id = str(uuid.uuid4())
+        config_df = pd.DataFrame(data=item.query_word_lst, columns=['query_word'])
+        config_df['config_id'] = config_id
+        config_df['operator'] = item.operator
+        config_df['operate_type'] = 1
+
+        # 新增配置记录插入到 ctop_ai_script_query_word_config
+        config_df.to_sql(name="ctop_ai_script_query_word_config",
+                         con=ai_word_engine,
+                         if_exists='append',
+                         index=False)
+        logger.info(f"request body: {item}, code:0, message: add_script_config success")
+        return {"code": 0,
+                "message": "add success",
+                "success": True}
+    except:
+        logger.error(f"request body: {item}, code:-1, message: add_script_config fail {traceback.format_exc()}")
+        return {"code": -1,
+                "message": traceback.format_exc(),
+                "success": False}
 
-    # test_items = [{'query_word': '红包', 'stat_date': '2021-10-28', 'source': 2},
-    #               {'query_word': '红包', 'stat_date': '2021-10-28', 'source': 3},
-    #               {'query_word': '赚钱', 'stat_date': '2021-10-28', 'source': 2},
-    #               {'query_word': '赚钱', 'stat_date': '2021-10-28', 'source': 3}]
-    # export_excel(test_items)
 
+if __name__ == '__main__':
     uvicorn.run(app='main:app', host="0.0.0.0", port=31013, reload=True, debug=True)
 # gunicorn main:app -w 4 -k uvicorn.workers.UvicornWorker #线上启动命令

+ 88 - 77
time_task/get_material_and_script_by_query_word.py

@@ -1,108 +1,119 @@
 import datetime
 import os
 import sys
-
+import traceback
+import uuid
 import pandas as pd
 import yaml
+from loguru import logger
 
 curr_path = os.path.abspath(os.path.dirname(__file__))
 project_root_path = curr_path[:curr_path.find("video_to_word") + len("video_to_word")]
 sys.path.append(project_root_path)
 
-from common_func import get_db_engine, get_logger
+from common_func import get_db_engine, mysql_replace_into
 from get_material_and_script.get_material_from_kuaishou_kaiyan import GetMaterialFromKuaishouKaiyan
 from get_material_and_script.get_material_from_ocean_engine import GetMaterialFromOceanEngine
-from get_material_and_script.get_script_from_tengxunyun import GetScriptFromTengXunYunServer
 from get_material_and_script.get_material_from_huichuang import GetMaterialFromHuiChuang
 
 if __name__ == '__main__':
     # 创建日志对象
-    logger = get_logger(log_file_name="/data/pythonProject/video_to_word/logs/get_high_quality_material.log",
-                        log_name='get_high_quality_material_logger')
-    logger.info("get_high_quality_material_logger started! id of logger is: %s" % id(logger))
+    logger.add("logs/get_high_quality_material.{time:YYYY-MM-DD}.log",
+               rotation="00:00",
+               format="{time:YYYY-MM-DD HH:mm:ss,SSS} [{process}] [{thread}] {level} {file} {line} - {message}",
+               level="INFO")
 
     # 1 读取配置文件
     with open('/data/pythonProject/video_to_word/config/config.yaml', mode='r', encoding='utf-8') as f:
         config = yaml.load(f.read(), Loader=yaml.FullLoader)
 
     # 1-1 数据库连接引擎,依据开发环境/生产环境 进行切换
-    # 读数据库引擎使用生产数据库,写数据库引擎依据系统环境进行切换(测试数据库/生产数据库)
-    # 注意:该项目的读和写 都使用测试数据库
-    # TODO 等数据库迁移,服务上线后需要依据环境进行切换
-    ai_word_engine = get_db_engine(config['ai_word_dev_db'])
+    mac = uuid.UUID(int=uuid.getnode()).hex[-12:]
+    if mac in ['5254003fa716', '52540003f5dd']:
+        ai_word_engine = get_db_engine(config['ai_word_dev_db'])
+    else:
+        ai_word_engine = get_db_engine(config['ai_word_product_db'])
+
     jeecg_boot_engine = get_db_engine(config['jeecg_boot_product_db'])
 
     # 1-2 分批写入数据库的行数
     chunk_size = config['chunk_size']
 
     # 1-3 渠道编码&名称
-    source_name = config['source_name_map']
+    source_name_map = config['source_name_map']
 
     # 2 读取查询表得到关键词和渠道
-    sql = """
-    select  query_word,
-            query_time_range,
-            source_code
-     from ctop_ai_query_word where status = 0
-    """
+    sql = f"select query_word from ctop_ai_script_query_word_config where end_time = '9999-12-31'"
     df = pd.read_sql(sql, ai_word_engine)
-    for index, row in df.iterrows():
-        query_word = row['query_word']
-        query_time_range = row['query_time_range']
-        source_code = eval(row['source_code'])
-
-        # 0 记录任务执行情况的字段
-        size = 0
-        message = ""
-        task_status = 0
-        inst = None
-
-        # 1 获取优质素材video_url
-        video_df = pd.DataFrame()
-        if source_code == 1:
-            # 获取公司内部的优质素材
-            inst = GetMaterialFromHuiChuang(query_word=query_word,
-                                            logger=logger,
-                                            jeecg_boot_db_engine=jeecg_boot_engine,
-                                            ai_word_db_engine=ai_word_engine)
-            inst.get_video_basic_info()
-
-        if source_code == 3:
-            # 获取快手开眼快创的优质素材
-            inst = GetMaterialFromKuaishouKaiyan(query_word=query_word,
-                                                 logger=logger,
-                                                 db_engine=ai_word_engine)
-            inst.get_video_basic_info()
-
-        if source_code == 2:
-            # 获取头条巨量引擎的优质素材,需要使用参数 query_time_range
-            inst = GetMaterialFromOceanEngine(query_word=query_word,
-                                              period_type=query_time_range,
-                                              logger=logger,
-                                              db_engine=ai_word_engine)
-            inst.get_material_basic_info()
-            if inst.signature_lst:
-                inst.get_video_basic_info()
-
-        # 2 调用腾讯云的语音转脚本服务,获取脚本
-        if (inst is not None) and (not inst.video_df.empty):
-            inst.video_df = inst.video_df[~inst.video_df.signature.isnull()]
-            get_script_ins = GetScriptFromTengXunYunServer(logger=logger,
-                                                           db_engine=ai_word_engine,
-                                                           task_df=inst.video_df[['signature', 'video_url']],
-                                                           task_ids=None)
-            get_script_ins.submit_task()
-            if get_script_ins.task_ids:
-                get_script_ins.get_result()
-                size = len(get_script_ins.task_ids)
-            else:
-                size = 0
-
-        # 3 任务执行情况写入数据库
-        task_info = {'query_word': query_word,
-                     'stat_date': datetime.date.today().strftime('%Y-%m-%d'),
-                     'source_code': source_code,
-                     'size': size}
-        logger.info(f"{task_info}")
-
-
+    query_word_lst = list(df['query_word'].unique())
+    query_word_time = 3
+    for query_word in query_word_lst:
+        for source_code, value in source_name_map.items():
+            if value['status'] == 1:
+                # 任务开始执行
+                task_info = {"source_code": source_code,
+                             "task_status": "执行中",
+                             "query_word": query_word,
+                             "script_num": None,
+                             "stat_date": datetime.datetime.today().strftime('%Y-%m-%d')}
+                task_info_df = pd.DataFrame([task_info])
+                task_info_df.to_sql(name='ctop_ai_query_word_task_record',
+                                    con=ai_word_engine,
+                                    if_exists='append',
+                                    index=False,
+                                    chunksize=1000,
+                                    method=mysql_replace_into)
+                try:
+                    inst = None
+                    # 获取视频信息
+                    if source_code == 1:
+                        # 获取公司内部的优质素材
+                        inst = GetMaterialFromHuiChuang(query_word=query_word,
+                                                        logger=logger,
+                                                        jeecg_boot_db_engine=jeecg_boot_engine,
+                                                        ai_word_db_engine=ai_word_engine)
+                        inst.get_video_basic_info()
+                    if source_code == 3:
+                        # 获取快手开眼快创的优质素材
+                        inst = GetMaterialFromKuaishouKaiyan(query_word=query_word,
+                                                             logger=logger,
+                                                             db_engine=ai_word_engine)
+                        inst.get_video_basic_info()
+                    if source_code == 2:
+                        # 获取头条巨量引擎的优质素材,需要使用参数 query_time_range
+                        inst = GetMaterialFromOceanEngine(query_word=query_word,
+                                                          period_type=query_word_time,
+                                                          logger=logger,
+                                                          db_engine=ai_word_engine)
+                        inst.get_material_basic_info()
+                        if inst.signature_lst:
+                            inst.get_video_basic_info()
+
+                    # 获取脚本信息
+                    if not inst.video_df.empty:
+                        inst.get_script_from_teng_xun_yun()
+                        sql = f"select signature from tb_asr_result where signature in" \
+                              f" {tuple(inst.video_df.signature.values) if len(inst.video_df) > 1 else tuple(list(inst.video_df.signature.values) * 2)}" \
+                              f"and word_text is not null"
+                        df = pd.read_sql(sql, ai_word_engine)
+                        script_num = len(df)
+                    else:
+                        script_num = 0
+                    # 更新任务执行情况: 执行状态 和 成功的数据量
+                    with ai_word_engine.begin() as conn:
+                        sql = f"update ctop_ai_query_word_task_record set script_num = {script_num}, task_status = '执行完成', message='执行成功'" \
+                              f"where query_word = '{query_word}'" \
+                              f"and stat_date = '{datetime.datetime.today().strftime('%Y-%m-%d')}'" \
+                              f"and source_code = {source_code}"
+                        conn.execute(sql)
+                    logger.info(f"查询词:{query_word} 数据来源: {source_code} 日期: {datetime.datetime.today().strftime('%Y-%m-%d')},"
+                                f"执行完成获取 {script_num} 条数据")
+                except:
+                    with ai_word_engine.begin() as conn:
+                        sql = f"update ctop_ai_query_word_task_record set  task_status = '执行异常' , message ={traceback.format_exc()}" \
+                              f"where query_word = '{query_word}'" \
+                              f"and stat_date = '{datetime.datetime.today().strftime('%Y-%m-%d')}'" \
+                              f"and source_code = {source_code}"
+                        conn.execute(sql)
+                    logger.error(f"查询词:{query_word} 数据来源: {source_code} 日期: {datetime.datetime.today().strftime('%Y-%m-%d')},"
+                                 f"执行异常: {traceback.format_exc()}")

+ 2 - 2
time_task/tmp_task.py

@@ -38,8 +38,8 @@ if __name__ == '__main__':
     source_name = config['source_name_map']
 
     # 2 获取素材编码和素材url
-    project_id = 458
-    query_word = '淘特'
+    project_id = 1860892
+    query_word = '爱奇艺极速版(新)'
     # `channel_type` int(2) DEFAULT '0' COMMENT '0:自产 1:素造',
     sql = f"select signature,url video_url, channel_type from ctop_kuaishou_video_get where account_id in " \
           f"(select account_id from ctop_user_allocation where project_id = {project_id}) " \