liyuyi@c-top.com.cn 3 лет назад
Родитель
Сommit
fcf0eeef89
1 измененных файлов с 58 добавлено и 53 удалено
  1. 58 53
      main.py

+ 58 - 53
main.py

@@ -22,6 +22,7 @@ from common_func import get_db_engine
 from config.url import toutiao_static_video_url
 from database import insert, update, query, Task
 
+logger.remove()  # 删去 import logger之后自动产生的handler,不删除的话会出现重复输出的现象
 logger.add("logs/loguru.{time:YYYY-MM-DD}.log",
            rotation="00:00",
            format="{time:YYYY-MM-DD HH:mm:ss,SSS} [{process}] [{thread}] {level} {file} {line} - {message}",
@@ -31,14 +32,13 @@ with open('/data/pythonProject/video_to_word/config/config.yaml', mode='r', enco
     config = yaml.load(f.read(), Loader=yaml.FullLoader)
     source_name_map = config['source_name_map']
 
-# 数据库连接引擎,依据开发、测试环境/生产环境 进行切换
+    # 数据库连接引擎,依据开发、测试环境/生产环境 进行切换
     mac = uuid.UUID(int=uuid.getnode()).hex[-12:]
     if mac in ['5254003fa716', '52540003f5dd']:
         ai_word_engine = get_db_engine(config['ai_word_dev_db'])
     else:
         ai_word_engine = get_db_engine(config['ai_word_product_db'])
 
-
 threadPool = ThreadPoolExecutor(max_workers=4)
 app = FastAPI()
 origins = [
@@ -186,57 +186,62 @@ class AddScriptConfig(BaseModel):
           summary='导出文件'
           )
 def export_script_file(item: List[QueryWordItem]):
-    video_df = pd.DataFrame()
-    # 1 从数据库获取视频数据
-    # 如果同一个素材有多个查询词,则合并打上这多个查询词
-    for obj in item:
-        query_word = obj.query_word
-        stat_date = obj.stat_date
-        source_code = obj.source_code
-        sql = f"select signature, video_url, query_word, stat_date, {source_code} source_code from {source_name_map[source_code]['table']} " \
-              f"where query_word = '{query_word}' " \
-              f"and stat_date = '{stat_date}'"
-        df = pd.read_sql(sql, ai_word_engine)
-        video_df = video_df.append(df)
-
-    # 按 'signature' + 'query_word' + 'stat_date' 进行去重
-    video_df.drop_duplicates(['signature', 'query_word', 'stat_date', 'source_code'], keep='last', inplace=True)
-
-    video_query_word_df = video_df.groupby('signature').apply(lambda x: pd.Series({'query_word_lst': x['query_word'].unique(),
-                                                                                   'video_url': x['video_url'].values[0],
-                                                                                   'source_code': x['source_code'].values[0]}))
-    video_query_word_df.reset_index(inplace=True, drop=False)
-
-    # 如果来源==2 (头条巨量引擎),把视频链接替换为永久链接
-    video_query_word_df['video_url'] = video_query_word_df.apply(
-        lambda row: toutiao_static_video_url + row['signature'] if row.get('source_code') == 2 else row['video_url'], axis=1)
-
-    # 2 根据第一步的视频数据获取脚本
-    if not video_query_word_df.empty:
-        signature_lst = list(video_query_word_df.signature.values) if len(video_query_word_df.signature.values) > 1 \
-            else list(video_query_word_df.signature.values) * 2
-        sql = f"select signature, word_text from tb_asr_result where signature in {tuple(signature_lst)}" \
-              f"and word_text is not null"
-        script_df = pd.read_sql(sql, ai_word_engine)
-        out_df = video_query_word_df.merge(script_df, on='signature', how='inner')
-
-    # 3 返回流数据
-    if not out_df.empty:
-        bio = BytesIO()
-        writer = pd.ExcelWriter(bio, engine='xlsxwriter')
-        out_df[['signature', 'query_word_lst', 'word_text', 'video_url']].to_excel(writer, index=False, encoding='utf8mb4')
-        writer.save()
-        bio.seek(0)
-
-        # 组装header
-        now_date = date.today().strftime('%Y-%m-%d')
-        headers = {"content-type": "application/vnd.ms-excel",
-                   "content-disposition": f"attachment;filename={quote('优质素材脚本_')}{now_date}.xlsx"
-                   }
-
-        return StreamingResponse(bio, media_type='xlsx', headers=headers)
-
-    return None
+    try:
+        video_df = pd.DataFrame()
+        # 1 从数据库获取视频数据
+        # 如果同一个素材有多个查询词,则合并打上这多个查询词
+        for obj in item:
+            query_word = obj.query_word
+            stat_date = obj.stat_date
+            source_code = obj.source_code
+            sql = f"select signature, video_url, query_word, stat_date, {source_code} source_code from {source_name_map[source_code]['table']} " \
+                  f"where query_word = '{query_word}' " \
+                  f"and stat_date = '{stat_date}'"
+            df = pd.read_sql(sql, ai_word_engine)
+            video_df = video_df.append(df)
+
+        # 按 'signature' + 'query_word' + 'stat_date' 进行去重
+        video_df.drop_duplicates(['signature', 'query_word', 'stat_date', 'source_code'], keep='last', inplace=True)
+
+        video_query_word_df = video_df.groupby('signature').apply(lambda x: pd.Series({'query_word_lst': x['query_word'].unique(),
+                                                                                       'video_url': x['video_url'].values[0],
+                                                                                       'source_code': x['source_code'].values[0]}))
+        video_query_word_df.reset_index(inplace=True, drop=False)
+
+        # 如果来源==2 (头条巨量引擎),把视频链接替换为永久链接
+        video_query_word_df['video_url'] = video_query_word_df.apply(
+            lambda row: toutiao_static_video_url + row['signature'] if row.get('source_code') == 2 else row['video_url'], axis=1)
+
+        # 2 根据第一步的视频数据获取脚本
+        if not video_query_word_df.empty:
+            signature_lst = list(video_query_word_df.signature.values) if len(video_query_word_df.signature.values) > 1 \
+                else list(video_query_word_df.signature.values) * 2
+            sql = f"select signature, word_text from tb_asr_result where signature in {tuple(signature_lst)}" \
+                  f"and word_text is not null"
+            script_df = pd.read_sql(sql, ai_word_engine)
+            out_df = video_query_word_df.merge(script_df, on='signature', how='inner')
+
+        # 3 返回流数据
+        if not out_df.empty:
+            bio = BytesIO()
+            writer = pd.ExcelWriter(bio, engine='xlsxwriter')
+            out_df[['signature', 'query_word_lst', 'word_text', 'video_url']].to_excel(writer, index=False, encoding='utf8mb4')
+            writer.save()
+            bio.seek(0)
+
+            # 组装header
+            now_date = date.today().strftime('%Y-%m-%d')
+            headers = {"content-type": "application/vnd.ms-excel",
+                       "content-disposition": f"attachment;filename={quote('优质素材脚本_')}{now_date}.xlsx"
+                       }
+            logger.info(f"request body: {item}, message: 数据导出成功")
+            return StreamingResponse(bio, media_type='xlsx', headers=headers)
+        else:
+            logger.info(f"request body: {item}, message: 没有获取到对应的数据")
+            return None
+    except:
+        logger.error(f"request body: {item}, message: {traceback.format_exc()}")
+        return None
 
 
 @logger.catch