import datetime from concurrent.futures import ThreadPoolExecutor from io import BytesIO from typing import Optional, List from urllib.parse import quote import hashlib import pandas as pd import uvicorn import yaml from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware from fastapi.responses import StreamingResponse from pydantic import BaseModel, Field from asr_client import send_asr_request, send_task_request from common_func import get_db_engine from config.url import toutiao_static_video_url from database import insert, update, query, Task with open('/data/pythonProject/video_to_word/config/config.yaml', mode='r', encoding='utf-8') as f: config = yaml.load(f.read(), Loader=yaml.FullLoader) source_name_map = config['source_name_map'] ai_word_engine = get_db_engine(config['ai_word_dev_db']) threadPool = ThreadPoolExecutor(max_workers=4) app = FastAPI() origins = [ "http://192.168.1.34", "http://192.168.1.34:8000", "http://192.168.1.105", "http://192.168.1.105:3000", "http://111.206.86.186", "http://111.206.86.186:3000", "http://adsp.tjyourong.com.cn", "http://adsp.tjyourong.com.cn:3000", "http://adsp.c-top.com.cn", "http://adsp.c-top.com.cn:3000" ] app.add_middleware( CORSMiddleware, allow_origins=origins, allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) class QueryItem(): signature: Optional[str] = None url: Optional[str] = None @app.get('/') def index(): return {'message': '你已经正确创建 FastApi 服务!'} @app.post('/asr/task/submit') def task_submit(signature: str, url: str): json = send_asr_request(url) task = Task(signature=signature, task_id=json.Data.TaskId, task_result=json.to_json_string(), task_status=1) insert(task) return {'code': 0, 'taskId': json.Data.TaskId} @app.post('/asr/task/result') def task_submit(task_id: int): json = send_task_request(task_id) task = query(None, None, task_id)[0] task.task_status = json.Data.Status task.task_result = json.to_json_string() try: if json.Data.Status == 2: task.word_text = json.Data.ResultDetail[0].FinalSentence task.word_split = json.Data.ResultDetail[0].SliceSentence task.word_text_md5 = hashlib.md5(task.word_text.encode('utf-8')).hexdigest() except: # 提取原始文本内容和分词内容发生异常,把 task_status 置为 -1 task.task_status = -1 update(task) return {'code': 0, 'status': json.Data.StatusStr} @app.post('/asr/task/list') def task_submit(task_status: int): task = query(None, task_status, None) return {'code': 0, 'data': task} class QueryWordItem(BaseModel): query_word: str = Field(..., description="查询词", min_length=1) stat_date: str = Field(..., description="日期", min_length=10, max_length=10) source: int = Field(..., description="来源,") @app.post('/export_excel/') def export_excel(item: List[QueryWordItem]): video_df = pd.DataFrame() if len(item) == 1: # 单个条目,直接导出 pass else: # 1 从数据库获取视频数据 # 多个条目,如果同一个素材有多个查询词,则合并打上这多个查询词 for obj in item: query_word = obj.query_word stat_date = obj.stat_date source = obj.source sql = f"select signature, video_url, query_word, stat_date, {source} source from {source_name_map[source]['table']} " \ f"where query_word = '{query_word}' " \ f"and stat_date = '{stat_date}'" df = pd.read_sql(sql, ai_word_engine) video_df = video_df.append(df) # 按 'signature' + 'query_word' + 'stat_date' 进行去重 video_df.drop_duplicates(['signature', 'query_word', 'stat_date', 'source'], keep='last', inplace=True) g = video_df.groupby('signature') query_word_lst_df = g.apply(lambda x: x['query_word'].unique()) query_word_lst_df.name = 'query_word_lst' url_df = g.apply(lambda x: x['video_url'].values[0]) url_df.name = 'video_url' source_df = g.apply(lambda x: x['source'].values[0]) source_df.name = 'source' video_query_word_df = pd.concat([query_word_lst_df, url_df, source_df], axis=1) video_query_word_df.reset_index(inplace=True, drop=False) video_query_word_df['video_url'] = video_query_word_df.apply( lambda row: toutiao_static_video_url + row['signature'] if row.get('source') == 2 else row['video_url'], axis=1) # 2 根据第一步的视频数据获取脚本 if not video_query_word_df.empty: sql = f"select signature, word_text from tb_asr_result where signature in " \ f"{tuple(video_query_word_df.signature.values) if len(video_query_word_df.signature.values) > 1 else tuple(list(video_query_word_df.signature.values) * 2)} " \ f"and task_status = 2" script_df = pd.read_sql(sql, ai_word_engine) out_df = video_query_word_df.merge(script_df, on='signature', how='inner') else: pass # 3 返回流数据 if not out_df.empty: bio = BytesIO() writer = pd.ExcelWriter(bio, engine='xlsxwriter') out_df[['signature', 'query_word_lst', 'word_text', 'video_url']].to_excel(writer, index=False, encoding='utf8mb4') writer.save() bio.seek(0) # 组装header now_date = datetime.date.today().strftime('%Y-%m-%d') headers = {"content-type": "application/vnd.ms-excel", "content-disposition": f"attachment;filename={quote('优质素材脚本_')}{now_date}.xlsx" } return StreamingResponse(bio, media_type='xlsx', headers=headers) return None if __name__ == '__main__': # 1 读取配置文件 # test_items = [{'query_word': '红包', 'stat_date': '2021-10-28', 'source': 2}, # {'query_word': '红包', 'stat_date': '2021-10-28', 'source': 3}, # {'query_word': '赚钱', 'stat_date': '2021-10-28', 'source': 2}, # {'query_word': '赚钱', 'stat_date': '2021-10-28', 'source': 3}] # export_excel(test_items) uvicorn.run(app='main:app', host="0.0.0.0", port=31013, reload=True, debug=True) # gunicorn main:app -w 4 -k uvicorn.workers.UvicornWorker #线上启动命令