123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234 |
- import datetime
- import hashlib
- import uuid
- from concurrent.futures import ThreadPoolExecutor
- from io import BytesIO
- from typing import Optional, List
- from urllib.parse import quote
- import pymysql
- import pandas as pd
- import uvicorn
- import yaml
- from fastapi import FastAPI
- from fastapi.middleware.cors import CORSMiddleware
- from fastapi.responses import StreamingResponse
- from pydantic import BaseModel, Field
- from asr_client import send_asr_request, send_task_request
- from common_func import get_db_engine, mysql_replace_into
- from config.url import toutiao_static_video_url
- from database import insert, update, query, Task
- with open('/data/pythonProject/video_to_word/config/config.yaml', mode='r', encoding='utf-8') as f:
- config = yaml.load(f.read(), Loader=yaml.FullLoader)
- source_name_map = config['source_name_map']
- ai_word_engine = get_db_engine(config['ai_word_dev_db'])
- threadPool = ThreadPoolExecutor(max_workers=4)
- app = FastAPI()
- origins = [
- "http://192.168.1.34",
- "http://192.168.1.34:8000",
- "http://192.168.1.105",
- "http://192.168.1.105:3000",
- "http://111.206.86.186",
- "http://111.206.86.186:3000",
- "http://adsp.tjyourong.com.cn",
- "http://adsp.tjyourong.com.cn:3000",
- "http://adsp.c-top.com.cn",
- "http://adsp.c-top.com.cn:3000"
- ]
- app.add_middleware(
- CORSMiddleware,
- allow_origins=origins,
- allow_credentials=True,
- allow_methods=["*"],
- allow_headers=["*"],
- )
- class QueryItem():
- signature: Optional[str] = None
- url: Optional[str] = None
- @app.get('/')
- def index():
- return {'message': '你已经正确创建 FastApi 服务!'}
- @app.post('/asr/task/submit')
- def task_submit(signature: str, url: str):
- json = send_asr_request(url)
- task = Task(signature=signature, task_id=json.Data.TaskId, task_result=json.to_json_string(), task_status=1)
- insert(task)
- return {'code': 0, 'taskId': json.Data.TaskId}
- @app.post('/asr/task/result')
- def task_submit(task_id: int):
- json = send_task_request(task_id)
- task = query(None, None, task_id)[0]
- task.task_status = json.Data.Status
- task.task_result = json.to_json_string()
- try:
- if json.Data.Status == 2:
- task.word_text = json.Data.ResultDetail[0].FinalSentence
- task.word_split = json.Data.ResultDetail[0].SliceSentence
- task.word_text_md5 = hashlib.md5(task.word_text.encode('utf-8')).hexdigest()
- except:
- # 提取原始文本内容和分词内容发生异常,把 task_status 置为 -1
- task.task_status = -1
- update(task)
- return {'code': 0, 'status': json.Data.StatusStr}
- @app.post('/asr/task/list')
- def task_submit(task_status: int):
- task = query(None, task_status, None)
- return {'code': 0, 'data': task}
- class QueryWordItem(BaseModel):
- query_word: str = Field(..., description="查询词", min_length=1)
- stat_date: str = Field(..., description="日期", min_length=10, max_length=10)
- source: int = Field(..., description="来源,")
- @app.post('/export_excel/')
- def export_excel(item: List[QueryWordItem]):
- video_df = pd.DataFrame()
- if len(item) == 1:
- # 单个条目,直接导出
- pass
- else:
- # 1 从数据库获取视频数据
- # 多个条目,如果同一个素材有多个查询词,则合并打上这多个查询词
- for obj in item:
- query_word = obj.query_word
- stat_date = obj.stat_date
- source = obj.source
- sql = f"select signature, video_url, query_word, stat_date, {source} source from {source_name_map[source]['table']} " \
- f"where query_word = '{query_word}' " \
- f"and stat_date = '{stat_date}'"
- df = pd.read_sql(sql, ai_word_engine)
- video_df = video_df.append(df)
- # 按 'signature' + 'query_word' + 'stat_date' 进行去重
- video_df.drop_duplicates(['signature', 'query_word', 'stat_date', 'source'], keep='last', inplace=True)
- g = video_df.groupby('signature')
- query_word_lst_df = g.apply(lambda x: x['query_word'].unique())
- query_word_lst_df.name = 'query_word_lst'
- url_df = g.apply(lambda x: x['video_url'].values[0])
- url_df.name = 'video_url'
- source_df = g.apply(lambda x: x['source'].values[0])
- source_df.name = 'source'
- video_query_word_df = pd.concat([query_word_lst_df, url_df, source_df], axis=1)
- video_query_word_df.reset_index(inplace=True, drop=False)
- video_query_word_df['video_url'] = video_query_word_df.apply(
- lambda row: toutiao_static_video_url + row['signature'] if row.get('source') == 2 else row['video_url'], axis=1)
- # 2 根据第一步的视频数据获取脚本
- if not video_query_word_df.empty:
- sql = f"select signature, word_text from tb_asr_result where signature in " \
- f"{tuple(video_query_word_df.signature.values) if len(video_query_word_df.signature.values) > 1 else tuple(list(video_query_word_df.signature.values) * 2)} " \
- f"and task_status = 2"
- script_df = pd.read_sql(sql, ai_word_engine)
- out_df = video_query_word_df.merge(script_df, on='signature', how='inner')
- else:
- pass
- # 3 返回流数据
- if not out_df.empty:
- bio = BytesIO()
- writer = pd.ExcelWriter(bio, engine='xlsxwriter')
- out_df[['signature', 'query_word_lst', 'word_text', 'video_url']].to_excel(writer, index=False, encoding='utf8mb4')
- writer.save()
- bio.seek(0)
- # 组装header
- now_date = datetime.date.today().strftime('%Y-%m-%d')
- headers = {"content-type": "application/vnd.ms-excel",
- "content-disposition": f"attachment;filename={quote('优质素材脚本_')}{now_date}.xlsx"
- }
- return StreamingResponse(bio, media_type='xlsx', headers=headers)
- return None
- class ScriptConfig(BaseModel):
- query_word_lst: List = Field(..., description="关键词组")
- operator: str = Field(..., description="操作者")
- @app.post('/get_script_config_lst/')
- def get_script_config_lst():
- pass
- @app.post('/add_script_config/')
- def add_script_config(item: ScriptConfig):
- config_id = str(uuid.uuid4())
- config_lst = []
- for query_word in item.query_word_lst:
- sql = f"select * from ctop_ai_query_word where query_word = '{query_word}'"
- query_word_df = pd.read_sql(sql, ai_word_engine)
- if not query_word_df.empty:
- # 更新 ctop_ai_query_word
- query_word_id = query_word_df.query_word_id.values[0]
- script_config_conn_num = query_word_df.script_config_conn_num.values[0] + 1
- db_con = pymysql.connect(**config['ai_word_dev_db'])
- db_cur = db_con.cursor()
- sql = f"update ctop_ai_query_word set script_config_conn_num = {script_config_conn_num} where query_word_id = '{query_word_id}'"
- db_cur.execute(sql)
- db_con.commit()
- db_con.close()
- # update_query_word_df = pd.DataFrame([{"query_word_id": query_word_id,
- # "query_word": query_word,
- # "script_conn_num": script_conn_num}])
- # update_query_word_df.to_sql(name="ctop_ai_query_word",
- # con=ai_word_engine,
- # if_exists="append",
- # method=mysql_replace_into,
- # index=False)
- else:
- query_word_id = str(uuid.uuid4())
- new_query_word_df = pd.DataFrame([{"query_word_id": query_word_id, "query_word": query_word, "script_config_conn_num": 1}])
- new_query_word_df.to_sql(name="ctop_ai_query_word",
- con=ai_word_engine,
- if_exists="append",
- index=False)
- config_lst.append({"config_id": config_id, "query_word_id": query_word_id})
- # 新增配置记录插入到 ctop_ai_script_query_word_config
- config_df = pd.DataFrame(config_lst)
- config_df['operator'] = item.operator
- config_df['operate_type'] = 1
- config_df.to_sql(name="ctop_ai_script_query_word_config",
- con=ai_word_engine,
- if_exists='append',
- index=False)
- return {"code": 0, "message": "success"}
- if __name__ == '__main__':
- # 1 读取配置文件
- # test_items = [{'query_word': '红包', 'stat_date': '2021-10-28', 'source': 2},
- # {'query_word': '红包', 'stat_date': '2021-10-28', 'source': 3},
- # {'query_word': '赚钱', 'stat_date': '2021-10-28', 'source': 2},
- # {'query_word': '赚钱', 'stat_date': '2021-10-28', 'source': 3}]
- # export_excel(test_items)
- uvicorn.run(app='main:app', host="0.0.0.0", port=31013, reload=True, debug=True)
- # gunicorn main:app -w 4 -k uvicorn.workers.UvicornWorker #线上启动命令
|