| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438 | 
							- import hashlib
 
- import os
 
- import sys
 
- import traceback
 
- import uuid
 
- from concurrent.futures import ThreadPoolExecutor
 
- from datetime import date
 
- from datetime import timedelta
 
- from io import BytesIO
 
- from typing import Optional, List
 
- from urllib.parse import quote
 
- import pandas as pd
 
- import uvicorn
 
- import yaml
 
- from fastapi import FastAPI
 
- from fastapi.middleware.cors import CORSMiddleware
 
- from fastapi.responses import StreamingResponse
 
- from loguru import logger
 
- from pydantic import BaseModel, Field
 
- curr_path = os.path.abspath(os.path.dirname(__file__))
 
- project_root_path = curr_path[:curr_path.find("video_to_word") + len("video_to_word")]
 
- sys.path.append(project_root_path)
 
- from asr_client import send_asr_request, send_task_request
 
- from common_func import get_db_engine
 
- from config.url_and_db import toutiao_static_video_url, ai_word_engine
 
- from database import insert, update, query, Task
 
- from time_task.get_material_and_script_by_query_word import get_material_and_script
 
- logger.remove()  # 删去 import logger之后自动产生的handler,不删除的话会出现重复输出的现象
 
- logger.add("/data/pythonProject/video_to_word/logs/main_server.{time:YYYY-MM-DD}.log",
 
-            rotation="00:00",
 
-            format="{time:YYYY-MM-DD HH:mm:ss,SSS} [{process}] [{thread}] {level} {file} {line} - {message}",
 
-            level="INFO")
 
- with open('/data/pythonProject/video_to_word/config/config.yaml', mode='r', encoding='utf-8') as f:
 
-     config = yaml.load(f.read(), Loader=yaml.FullLoader)
 
-     source_name_map = config['source_name_map']
 
- threadPool = ThreadPoolExecutor(max_workers=4)
 
- app = FastAPI()
 
- origins = [
 
-     "http://192.168.0.195:9001",
 
-     "http://192.168.6.220:3000",
 
-     "http://192.168.1.34",
 
-     "http://192.168.1.34:8000",
 
-     "http://192.168.1.105",
 
-     "http://192.168.1.105:3000",
 
-     "http://111.206.86.186",
 
-     "http://111.206.86.186:3000",
 
-     "http://adsp.tjyourong.com.cn",
 
-     "http://adsp.tjyourong.com.cn:3000",
 
-     "http://adsp.c-top.com.cn",
 
-     "http://adsp.c-top.com.cn:3000"
 
- ]
 
- app.add_middleware(
 
-     CORSMiddleware,
 
-     allow_origins=origins,
 
-     allow_credentials=True,
 
-     allow_methods=["*"],
 
-     allow_headers=["*"],
 
- )
 
- class QueryItem():
 
-     signature: Optional[str] = None
 
-     url: Optional[str] = None
 
- @app.get('/', tags=['back-end task'])
 
- def index():
 
-     return {'message': '你已经正确创建 FastApi 服务!'}
 
- @app.post('/asr/task/submit', tags=['back-end task'])
 
- def task_submit(signature: str, url: str):
 
-     json = send_asr_request(url)
 
-     task = Task(signature=signature, task_id=json.Data.TaskId, task_result=json.to_json_string(), task_status=1)
 
-     insert(task)
 
-     return {'code': 0, 'taskId': json.Data.TaskId}
 
- @app.post('/asr/task/result', tags=['back-end task'])
 
- def task_submit(task_id: int):
 
-     json = send_task_request(task_id)
 
-     task = query(None, None, task_id)[0]
 
-     task.task_status = json.Data.Status
 
-     task.task_result = json.to_json_string()
 
-     try:
 
-         if json.Data.Status == 2:
 
-             task.word_text = json.Data.ResultDetail[0].FinalSentence
 
-             task.word_split = json.Data.ResultDetail[0].SliceSentence
 
-             task.word_text_md5 = hashlib.md5(task.word_text.encode('utf-8')).hexdigest()
 
-     except:
 
-         # 提取原始文本内容和分词内容发生异常,把 task_status 置为 -1
 
-         task.task_status = -1
 
-     update(task)
 
-     return {'code': 0, 'status': json.Data.StatusStr}
 
- @app.post('/asr/task/list', tags=['back-end task'])
 
- def task_submit(task_status: int):
 
-     task = query(None, task_status, None)
 
-     return {'code': 0, 'data': task}
 
- class BaseResponse(BaseModel):
 
-     message: str = Field(..., description='消息')
 
-     success: bool = Field(..., description='true or false')
 
-     code: int = Field(..., description='')
 
- class TaskDetail(BaseModel):
 
-     source_name: str = Field('内部创意', description='数据来源名称')
 
-     query_word: str = Field('红包', description='关键词')
 
-     stat_date: str = Field('2021-11-11', description='日期')
 
-     script_num: str = Field('', description='脚本数量')
 
-     task_status: str = Field('执行成功', description='状态')
 
-     number: int = Field(0, description='序号')
 
- class ConfigDetail(BaseModel):
 
-     config_id: str = Field(..., description="脚本配置id")
 
-     query_word_lst: List[str] = Field(..., description="关键词")
 
-     create_time: str = Field(..., description="创建时间")
 
-     operator: str = Field(..., description="创建人")
 
-     number: int = Field(..., description="序号")
 
-     user_id: str = Field(..., description="用户id")
 
- class TaskResponse(BaseResponse):
 
-     total_num: int = Field(0, description="总个数")
 
-     page_num: int = Field(1, description="第几页")
 
-     page_size: int = Field(10, description="每页个数")
 
-     config_id: str = Field('', description="脚本配置id")
 
-     result: List[TaskDetail] = Field(..., description="结果详情")
 
- class ConfigResponse(BaseResponse):
 
-     total_num: int = Field(0, description="总个数")
 
-     page_num: int = Field(1, description="第几页")
 
-     page_size: int = Field(10, description="每页个数")
 
-     result: List[ConfigDetail] = Field(..., description="结果详情")
 
- class QueryWordItem(BaseModel):
 
-     query_word: str = Field("红包", description="查询词", min_length=1)
 
-     stat_date: str = Field("2021-11-16", description="日期", min_length=10, max_length=10)
 
-     source_code: int = Field(2, description="数据来源编码{1:'内部创意', 2:'巨量创意', 3:'开眼快创'}")
 
- class ScriptConfigLst(BaseModel):
 
-     start_date: Optional[date] = Field(date.today() + timedelta(days=-6), description="开始日期-用于查询")
 
-     end_date: Optional[date] = Field(date.today(), description="结束日期-用于查询")
 
-     search_word: Optional[str] = Field('', description="关键词-用于查询")
 
-     page_num: int = Field(1, description="第几页")
 
-     page_size: int = Field(10, description="每页的大小")
 
- class QueryWordTaskInfoLst(BaseModel):
 
-     start_date: Optional[date] = Field(date.today() + timedelta(days=-30), description="开始日期-用于查询")
 
-     end_date: Optional[date] = Field(date.today(), description="结束日期-用于查询")
 
-     search_word: Optional[str] = Field('', description="关键词-用于查询")
 
-     page_num: int = Field(1, description="第几页")
 
-     page_size: int = Field(10, description="每页的大小")
 
-     config_id: Optional[str] = Field('', description="脚本配置id")
 
-     source_code: Optional[List[int]] = Field([0], description="数据来源编码{1:'内部创意', 2:'巨量创意', 3:'开眼快创'}")
 
- class AddScriptConfig(BaseModel):
 
-     query_word_lst: List[str] = Field(..., description="关键词组")
 
-     operator: str = Field(..., description="操作者")
 
-     user_id: str = Field(..., description="user_id")
 
-     class Config:
 
-         schema_extra = {
 
-             "example": {
 
-                 "query_word_lst": ["红包", "淘特"],
 
-                 "operator": "龙猫",
 
-                 "user_id": "234d46d1873f4dac85b2a2f9ad541e18"
 
-             }
 
-         }
 
- @logger.catch
 
- @app.post('/export_script_file/', tags=['front-end interactive'],
 
-           description="导出文件",
 
-           summary='导出文件',
 
-           response_model=BaseResponse
 
-           )
 
- def export_script_file(item: List[QueryWordItem]):
 
-     try:
 
-         video_df = pd.DataFrame()
 
-         out_df = pd.DataFrame()
 
-         # 1 从数据库获取视频数据
 
-         # 如果同一个素材有多个查询词,则合并打上这多个查询词
 
-         for obj in item:
 
-             query_word = obj.query_word
 
-             stat_date = obj.stat_date
 
-             source_code = obj.source_code
 
-             sql = f"select signature, video_url, query_word, stat_date, {source_code} source_code from {source_name_map[source_code]['table']} " \
 
-                   f"where query_word = '{query_word}' " \
 
-                   f"and stat_date = '{stat_date}'"
 
-             df = pd.read_sql(sql, ai_word_engine)
 
-             video_df = video_df.append(df)
 
-         if not video_df.empty:
 
-             # 按 'signature' + 'query_word' + 'stat_date' 进行去重
 
-             video_df.drop_duplicates(['signature', 'query_word', 'stat_date', 'source_code'], keep='last', inplace=True)
 
-             video_query_word_df = video_df.groupby('signature').apply(lambda x: pd.Series({'query_word_lst': x['query_word'].unique(),
 
-                                                                                            'video_url': x['video_url'].values[0],
 
-                                                                                            'source_code': x['source_code'].values[0]}))
 
-             video_query_word_df.reset_index(inplace=True, drop=False)
 
-             # 如果来源==2 (头条巨量引擎),把视频链接替换为永久链接
 
-             video_query_word_df['video_url'] = video_query_word_df.apply(
 
-                 lambda row: toutiao_static_video_url + row['signature'] if row.get('source_code') == 2 else row['video_url'], axis=1)
 
-             # 2 根据第一步的视频数据获取脚本
 
-             signature_lst = list(video_query_word_df.signature.values) if len(video_query_word_df.signature.values) > 1 \
 
-                 else list(video_query_word_df.signature.values) * 2
 
-             sql = f"select signature, word_text from tb_asr_result where signature in {tuple(signature_lst)}" \
 
-                   f"and word_text is not null"
 
-             script_df = pd.read_sql(sql, ai_word_engine)
 
-             out_df = video_query_word_df.merge(script_df, on='signature', how='inner')
 
-         # 3 返回流数据
 
-         if not out_df.empty:
 
-             bio = BytesIO()
 
-             writer = pd.ExcelWriter(bio, engine='xlsxwriter')
 
-             out_df[['signature', 'query_word_lst', 'word_text', 'video_url']].to_excel(writer, index=False, encoding='utf8mb4')
 
-             writer.save()
 
-             bio.seek(0)
 
-             # 组装header
 
-             now_date = date.today().strftime('%Y-%m-%d')
 
-             headers = {"content-type": "application/vnd.ms-excel",
 
-                        "content-disposition": f"attachment;filename={quote('优质素材脚本_')}{now_date}.xlsx"
 
-                        }
 
-             logger.info(f"request body: {item}, message: 数据导出成功")
 
-             return StreamingResponse(bio, media_type='xlsx', headers=headers)
 
-         else:
 
-             logger.info(f"request body: {item}, message: 没有获取到对应的数据")
 
-             return {"code": 0,
 
-                     "message": "没有获取到对应的数据",
 
-                     "success": True}
 
-     except:
 
-         logger.error(f"request body: {item}, message: {traceback.format_exc()}")
 
-         return {"code": 0,
 
-                 "message": {traceback.format_exc()},
 
-                 "success": False}
 
- @logger.catch
 
- @app.post('/get_script_config_lst/', tags=['front-end interactive'], response_model=ConfigResponse,
 
-           description="脚本配置列表",
 
-           summary='脚本配置列表'
 
-           )
 
- def get_script_config_lst(item: ScriptConfigLst):
 
-     try:
 
-         end_date = item.end_date + timedelta(days=1)
 
-         org_df = pd.DataFrame()
 
-         sql = f"select * from ctop_ai_script_query_word_config where config_id in " \
 
-               f"(select distinct(config_id) config_id  from ctop_ai_script_query_word_config " \
 
-               f"where start_time >= '{item.start_date}' and start_time < '{end_date}' " \
 
-               f"and ('{item.search_word}' = '' or query_word like '%%{item.search_word}%%') ) "
 
-         org_df = pd.read_sql(sql, ai_word_engine)
 
-         if not org_df.empty:
 
-             g_df = org_df.groupby('config_id').apply(lambda x: pd.Series({'query_word_lst': list(x['query_word'].unique()),
 
-                                                                           'operator': x['operator'].min(),
 
-                                                                           'create_time': str(x['start_time'].min()),
 
-                                                                           'user_id': x['user_id'].min()}))
 
-             g_df.reset_index(drop=False, inplace=True)
 
-             g_df.sort_values(by='create_time', ascending=False, inplace=True)
 
-             g_df['number'] = list(range(1, len(g_df) + 1))
 
-             total_num = g_df.shape[0]
 
-             detail = g_df.iloc[(item.page_num - 1) * item.page_size: item.page_num * item.page_size].to_dict('records')
 
-             response = {'code': 0,
 
-                         "message": "查询成功",
 
-                         "success": True,
 
-                         "result": detail,
 
-                         "total_num": total_num,
 
-                         "page_num": item.page_num,
 
-                         "page_size": item.page_size}
 
-             logger.info(f"request body: {item}, response body: {response}")
 
-             return response
 
-         else:
 
-             response = {'code': 0,
 
-                         "message": "没有符合条件的数据",
 
-                         "success": True,
 
-                         "result": [],
 
-                         "total_num": 0,
 
-                         "page_num": item.page_num,
 
-                         "page_size": item.page_size}
 
-             logger.info(f"request body: {item}, response body: {response}")
 
-             return response
 
-     except:
 
-         response = {"code": -1,
 
-                     "message": traceback.format_exc(),
 
-                     "success": False,
 
-                     "result": None}
 
-         logger.error(f"request body: {item}, response body: {response}")
 
-         return response
 
- @logger.catch
 
- @app.post('/get_query_word_task_info_lst/', tags=['front-end interactive'], response_model=TaskResponse,
 
-           description="脚本数据导出列表",
 
-           summary='脚本数据导出列表'
 
-           )
 
- def get_query_word_task_info_lst(item: QueryWordTaskInfoLst):
 
-     try:
 
-         end_date = item.end_date + timedelta(days=1)
 
-         source_code_lst = item.source_code * 2 if len(item.source_code) == 1 else item.source_code
 
-         df = pd.DataFrame()
 
-         if item.config_id != '':
 
-             sql = f"select distinct(query_word) query_word from ctop_ai_script_query_word_config where config_id = '{item.config_id}'"
 
-             query_word_lst = list(pd.read_sql(sql, ai_word_engine).query_word.values)
 
-             if len(query_word_lst) > 0:
 
-                 query_word_lst = query_word_lst * 2 if len(query_word_lst) == 1 else query_word_lst
 
-                 sql = f"select * from ctop_ai_query_word_task_record where query_word in {tuple(query_word_lst)}" \
 
-                       f"and stat_date >= '{item.start_date}' and stat_date < '{end_date}' " \
 
-                       f"and ('{item.source_code}' = '[0]' or source_code in {tuple(source_code_lst)}) " \
 
-                       f"and ('{item.search_word}' = '' or query_word = '{item.search_word}')"
 
-                 df = pd.read_sql(sql, ai_word_engine)
 
-         else:
 
-             sql = f"select * from ctop_ai_query_word_task_record where " \
 
-                   f"stat_date >= '{item.start_date}' and stat_date < '{end_date}' " \
 
-                   f"and ('{item.source_code}' = '[0]' or source_code in {tuple(source_code_lst)}) " \
 
-                   f"and ('{item.search_word}' = '' or query_word = '{item.search_word}')"
 
-             df = pd.read_sql(sql, ai_word_engine)
 
-         if not df.empty:
 
-             df['source_name'] = df['source_code'].apply(lambda x: source_name_map[x]['name'])
 
-             df = df[['source_name', 'query_word', 'stat_date', 'script_num', 'task_status']]
 
-             df.sort_values(['stat_date', 'source_name', 'query_word'], ascending=False, inplace=True)
 
-             df['number'] = list(range(1, len(df) + 1))
 
-             # script_num 字段类型由 np.array 转化为 str 类型,解决返回 np.nan 时, responseModel 验证不通过
 
-             df['script_num'] = df['script_num'].astype(pd.Int64Dtype())
 
-             df['script_num'] = df['script_num'].astype(str)
 
-             df.replace('<NA>', '', inplace=True)
 
-             total_num = df.shape[0]
 
-             detail = df.iloc[(item.page_num - 1) * item.page_size: item.page_num * item.page_size].to_dict('records')
 
-             response = {'code': 0,
 
-                         "message": "查询成功",
 
-                         "success": True,
 
-                         "result": detail,
 
-                         "total_num": total_num,
 
-                         "page_num": item.page_num,
 
-                         "page_size": item.page_size,
 
-                         "config_id": item.config_id}
 
-             logger.info(f"request body: {item}, response body: {response}")
 
-             return response
 
-         else:
 
-             response = {'code': 0,
 
-                         "message": "没有符合条件的数据",
 
-                         "success": True,
 
-                         "result": [],
 
-                         "total_num": 0,
 
-                         "page_num": item.page_num,
 
-                         "page_size": item.page_size,
 
-                         "config_id": item.config_id}
 
-             logger.info(f"request body: {item}, response body: {response}")
 
-             return response
 
-     except:
 
-         response = {"code": -1,
 
-                     "message": traceback.format_exc(),
 
-                     "success": False,
 
-                     "result": None}
 
-         logger.error(f"request body: {item}, response body: {response}")
 
-         return response
 
- @logger.catch
 
- @app.post('/add_script_config/',
 
-           tags=['front-end interactive'],
 
-           description="新增脚本配置",
 
-           summary='新增脚本配置',
 
-           response_model=BaseResponse)
 
- def add_script_config(item: AddScriptConfig):
 
-     try:
 
-         # 按查询词拆分配置记录
 
-         config_id = str(uuid.uuid4())
 
-         config_df = pd.DataFrame(data=item.query_word_lst, columns=['query_word'])
 
-         config_df['config_id'] = config_id
 
-         config_df['operator'] = item.operator
 
-         config_df['operate_type'] = 1
 
-         config_df['user_id'] = item.user_id
 
-         # 新增配置记录插入到 ctop_ai_script_query_word_config
 
-         config_df.to_sql(name="ctop_ai_script_query_word_config",
 
-                          con=ai_word_engine,
 
-                          if_exists='append',
 
-                          index=False)
 
-         logger.info(f"request body: {item}, code:0, message: add_script_config success")
 
-         return {"code": 0,
 
-                 "message": "add success",
 
-                 "success": True}
 
-     except:
 
-         logger.error(f"request body: {item}, code:-1, message: add_script_config fail {traceback.format_exc()}")
 
-         return {"code": -1,
 
-                 "message": traceback.format_exc(),
 
-                 "success": False}
 
- @logger.catch
 
- @app.post('/get_material_and_script_time_task/',
 
-           response_model=BaseResponse,
 
-           tags=['back-end task'],
 
-           description="获取素材和脚本任务",
 
-           summary='获取素材和脚本任务')
 
- def get_material_and_script_time_task():
 
-     try:
 
-         get_material_and_script()
 
-         logger.info(f"{date.today().strftime('%Y-%m-%d')}, 获取素材和脚本任务执行完成.")
 
-         return {"code": 0,
 
-                 "success": True,
 
-                 "message": f"{date.today().strftime('%Y-%m-%d')},获取素材和脚本任务执行完成."}
 
-     except:
 
-         logger.error(f"{date.today().strftime('%Y-%m-%d')}, 获取素材和脚本任务执行发生异常. {traceback.format_exc()}")
 
-         return {"code": -1,
 
-                 "success": False,
 
-                 "message": f"{date.today().strftime('%Y-%m-%d')},获取素材和脚本任务执行发生异常 .{traceback.format_exc()}"}
 
- if __name__ == '__main__':
 
-     uvicorn.run(app='main:app', host="0.0.0.0", port=31013, reload=True, debug=True)
 
- # gunicorn main:app -w 4 -k uvicorn.workers.UvicornWorker #线上启动命令
 
 
  |