123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442 |
- import os
- import sys
- import traceback
- import uuid
- from datetime import date, datetime
- from datetime import timedelta
- from io import BytesIO
- from typing import Optional, List
- from urllib.parse import quote
- import pandas as pd
- import yaml
- from fastapi import APIRouter
- from fastapi.responses import StreamingResponse
- from loguru import logger
- from pangres import upsert
- from pydantic import BaseModel, Field
- curr_path = os.path.abspath(os.path.dirname(__file__))
- project_root_path = curr_path[:curr_path.find("video_to_word") + len("video_to_word")]
- sys.path.append(project_root_path)
- from config.url_and_db import toutiao_static_video_url, ai_word_engine
- from sqlalchemy import VARCHAR
- from time_task.get_material_and_script_by_query_word import get_material_and_script
- router = APIRouter(tags=['script_config_server'])
- with open('/data/pythonProject/video_to_word/config/config.yaml', mode='r', encoding='utf-8') as f:
- config = yaml.load(f.read(), Loader=yaml.FullLoader)
- source_name_map = config['source_name_map']
- class BaseResponse(BaseModel):
- message: str = Field(..., description='消息')
- success: bool = Field(True, description='true or false')
- code: int = Field(0, description='')
- class TaskDetail(BaseModel):
- source_name: str = Field('内部创意', description='数据来源名称')
- query_word: str = Field('红包', description='关键词')
- stat_date: str = Field('2021-11-11', description='日期')
- script_num: str = Field('', description='脚本数量')
- task_status: str = Field('执行成功', description='状态')
- number: int = Field(0, description='序号')
- class ConfigDetail(BaseModel):
- config_id: str = Field(..., description="脚本配置id")
- query_word: List[str] = Field(..., description="关键词")
- recommended_word: List[str] = Field([""], description="推荐词")
- create_time: str = Field(..., description="创建时间")
- operator: str = Field(..., description="创建人")
- number: int = Field(..., description="序号")
- user_id: str = Field(..., description="用户id")
- class TaskResponse(BaseResponse):
- total_num: int = Field(0, description="总个数")
- page_num: int = Field(1, description="第几页")
- page_size: int = Field(10, description="每页个数")
- config_id: str = Field('', description="脚本配置id")
- result: List[TaskDetail] = Field([], description="结果详情")
- class ConfigResponse(BaseResponse):
- total_num: int = Field(0, description="总个数")
- page_num: int = Field(1, description="第几页")
- page_size: int = Field(10, description="每页个数")
- result: List[ConfigDetail] = Field([], description="结果详情")
- class QueryWordItem(BaseModel):
- query_word: str = Field("红包", description="查询词", min_length=1)
- stat_date: str = Field("2021-11-16", description="日期", min_length=10, max_length=10)
- source_code: int = Field(2, description="数据来源编码{1:'内部创意', 2:'巨量创意', 3:'开眼快创'}")
- class ScriptConfigLst(BaseModel):
- start_date: Optional[date] = Field(date.today() + timedelta(days=-29), description="开始日期-用于查询")
- end_date: Optional[date] = Field(date.today(), description="结束日期-用于查询")
- search_word: Optional[str] = Field('', description="关键词/推荐词-用于查询")
- page_num: int = Field(1, description="第几页")
- page_size: int = Field(10, description="每页的大小")
- class QueryWordTaskInfoLst(BaseModel):
- start_date: Optional[date] = Field(date.today() + timedelta(days=-30), description="开始日期-用于查询")
- end_date: Optional[date] = Field(date.today(), description="结束日期-用于查询")
- search_word: Optional[str] = Field('', description="关键词/推荐词-用于查询")
- page_num: int = Field(1, description="第几页")
- page_size: int = Field(10, description="每页的大小")
- config_id: Optional[str] = Field('', description="脚本配置id")
- source_code: Optional[List[int]] = Field([], description="数据来源编码{1:'内部创意', 2:'巨量创意', 3:'开眼快创'}")
- class QueryWordAndRecommendedWordPair(BaseModel):
- query_word: str = Field(..., description='关键字')
- recommended_word: List[str] = Field([], description='推荐词')
- class AddScriptConfig(BaseModel):
- query_word_pair: List[QueryWordAndRecommendedWordPair] = Field(..., description="关键词-推荐词")
- operator: str = Field(..., description="操作者")
- user_id: str = Field(..., description="user_id")
- class Config:
- schema_extra = {
- "example": {
- "query_word_pair": [{"query_word": "水蜜桃", "recommended_word": ["我等你", "陌陌", "寻爱", "聊吧"]}],
- "operator": "管理员",
- "user_id": "e9ca23d68d884d4ebb19d07889727dae"
- }
- }
- class DeleteScriptConfig(BaseModel):
- config_id: str = Field(..., description="脚本配置id")
- operator: str = Field(..., description="操作者")
- user_id: str = Field(..., description="user_id")
- class Config:
- schema_extra = {
- "example": {
- "config_id": "71951bcb-0ef7-4ce0-9be5-c8aaf3128ab7",
- "operator": "管理员",
- "user_id": "e9ca23d68d884d4ebb19d07889727dae"
- }
- }
- @logger.catch
- @router.post('/export_script_file/',
- description="导出文件",
- summary='导出文件',
- response_model=BaseResponse
- )
- def export_script_file(item: List[QueryWordItem]):
- try:
- logger.info(f"request body: {item}")
- video_df = pd.DataFrame()
- out_df = pd.DataFrame()
- # 1 从数据库获取视频数据
- # 如果同一个素材有多个查询词,则合并打上这多个查询词
- for obj in item:
- query_word = obj.query_word
- stat_date = obj.stat_date
- source_code = obj.source_code
- sql = f"select signature, video_url, query_word, stat_date, {source_code} source_code from {source_name_map[source_code]['table']} " \
- f"where query_word = '{query_word}' " \
- f"and stat_date = '{stat_date}'"
- df = pd.read_sql(sql, ai_word_engine)
- video_df = video_df.append(df)
- if not video_df.empty:
- # 按 'signature' + 'query_word' + 'stat_date' 进行去重
- video_df.drop_duplicates(['signature', 'query_word', 'stat_date', 'source_code'], keep='last', inplace=True)
- video_query_word_df = video_df.groupby('signature').apply(lambda x: pd.Series({'query_word_lst': x['query_word'].unique(),
- 'video_url': x['video_url'].values[0],
- 'source_code': x['source_code'].values[0]}))
- video_query_word_df.reset_index(inplace=True, drop=False)
- # 如果来源==2 (头条巨量引擎),把视频链接替换为永久链接
- video_query_word_df['video_url'] = video_query_word_df.apply(
- lambda row: toutiao_static_video_url + row['signature'] if row.get('source_code') == 2 else row['video_url'], axis=1)
- # 2 根据第一步的视频数据获取脚本
- signature_lst = list(video_query_word_df.signature.values) if len(video_query_word_df.signature.values) > 1 \
- else list(video_query_word_df.signature.values) * 2
- sql = f"select signature, word_text from tb_asr_result where signature in {tuple(signature_lst)}" \
- f"and word_text is not null"
- script_df = pd.read_sql(sql, ai_word_engine)
- out_df = video_query_word_df.merge(script_df, on='signature', how='inner')
- # 手动数据库关闭连接
- ai_word_engine.dispose()
- # 3 返回流数据
- if not out_df.empty:
- bio = BytesIO()
- writer = pd.ExcelWriter(bio, engine='xlsxwriter')
- out_df[['signature', 'query_word_lst', 'word_text', 'video_url']].to_excel(writer, index=False, encoding='utf8mb4')
- writer.save()
- bio.seek(0)
- # 组装header
- now_date = date.today().strftime('%Y-%m-%d')
- headers = {"content-type": "application/vnd.ms-excel",
- "content-disposition": f"attachment;filename={quote('优质素材脚本_')}{now_date}.xlsx"
- }
- logger.info(f"request body: {item}, message: 数据导出成功")
- return StreamingResponse(bio, media_type='xlsx', headers=headers)
- else:
- logger.info(f"request body: {item}, message: 没有获取到对应的数据")
- return {"code": 0,
- "message": "没有获取到对应的数据",
- "success": True}
- except:
- logger.error(f"request body: {item}, message: {traceback.format_exc()}")
- return {"code": 0,
- "message": {traceback.format_exc()},
- "success": False}
- @logger.catch
- @router.post('/get_script_config_lst/', response_model=ConfigResponse,
- description="脚本配置列表",
- summary='脚本配置列表'
- )
- def get_script_config_lst(item: ScriptConfigLst):
- logger.info(f"request body: {item}")
- response = ConfigResponse(message="查询成功")
- try:
- end_date = item.end_date + timedelta(days=1)
- sql = f"select * from ctop_ai_script_query_word_config where config_id in " \
- f"(select distinct(config_id) config_id from ctop_ai_script_query_word_config " \
- f"where ('{item.search_word}' = '' or query_word like '%%{item.search_word}%%' or recommended_word like '%%{item.search_word}%%') )" \
- f"and operate_type = 1 and end_time ='9999-12-31' " \
- f"and start_time >= '{item.start_date}' and start_time < '{end_date}' "
- org_df = pd.read_sql(sql, ai_word_engine)
- if not org_df.empty:
- g_df = org_df.groupby('config_id').apply(lambda x: pd.Series({'query_word': list(x['query_word'].unique()),
- 'recommended_word': list(x['recommended_word'].unique()),
- 'operator': x['operator'].min(),
- 'create_time': str(x['start_time'].min()),
- 'user_id': x['user_id'].min()}))
- g_df.reset_index(drop=False, inplace=True)
- g_df.sort_values(by='create_time', ascending=False, inplace=True)
- g_df['number'] = list(range(1, len(g_df) + 1))
- total_num = g_df.shape[0]
- detail = g_df.iloc[(item.page_num - 1) * item.page_size: item.page_num * item.page_size].to_dict('records')
- response.result = detail
- response.total_num = total_num
- response.page_num = item.page_num
- response.page_size = item.page_size
- else:
- response.message = "没有符合条件的数据"
- response.page_num = item.page_num
- response.page_size = item.page_size
- logger.info(f"request body: {item}, response body: {response}")
- except:
- response.code = -1
- response.message = traceback.format_exc()
- response.success = False
- logger.error(f"request body: {item}, response body: {response}")
- return response
- @logger.catch
- @router.post('/get_query_word_task_info_lst/', response_model=TaskResponse,
- description="脚本数据导出列表",
- summary='脚本数据导出列表'
- )
- def get_query_word_task_info_lst(item: QueryWordTaskInfoLst):
- logger.info(f"request body: {item}")
- response = TaskResponse(code=0, message="查询成功", success=True)
- try:
- end_date = item.end_date + timedelta(days=1)
- source_code_lst = [-1, -2] if len(item.source_code) == 0 else (item.source_code * 2 if len(item.source_code) == 1 else item.source_code)
- df = pd.DataFrame()
- if item.config_id != '':
- sql = f"select query_word, recommended_word from ctop_ai_script_query_word_config where config_id = '{item.config_id}'"
- config_df = pd.read_sql(sql, ai_word_engine)
- query_word_set = set(config_df['query_word'].values)
- recommended_word_set = set(config_df['recommended_word'].values)
- word_lst = list(query_word_set.union(recommended_word_set))
- if len(word_lst) > 0:
- word_lst = word_lst * 2 if len(word_lst) == 1 else word_lst
- sql = f"select * from ctop_ai_query_word_task_record where query_word in {tuple(word_lst)}" \
- f"and stat_date >= '{item.start_date}' and stat_date < '{end_date}' " \
- f"and ('{item.source_code}' = '[]' or source_code in {tuple(source_code_lst)}) " \
- f"and ('{item.search_word}' = '' or query_word = '{item.search_word}')"
- df = pd.read_sql(sql, ai_word_engine)
- else:
- sql = f"select * from ctop_ai_query_word_task_record where " \
- f"stat_date >= '{item.start_date}' and stat_date < '{end_date}' " \
- f"and ('{item.source_code}' = '[]' or source_code in {tuple(source_code_lst)}) " \
- f"and ('{item.search_word}' = '' or query_word = '{item.search_word}')"
- df = pd.read_sql(sql, ai_word_engine)
- # 手动数据库关闭连接
- ai_word_engine.dispose()
- if not df.empty:
- df['source_name'] = df['source_code'].apply(lambda x: source_name_map[x]['name'])
- df = df[['source_name', 'query_word', 'stat_date', 'script_num', 'task_status']]
- df.sort_values(['stat_date', 'source_name', 'query_word'], ascending=False, inplace=True)
- df['number'] = list(range(1, len(df) + 1))
- # script_num 字段类型由 np.array 转化为 str 类型,解决返回 np.nan 时, responseModel 验证不通过
- df['script_num'] = df['script_num'].astype(pd.Int64Dtype())
- df['script_num'] = df['script_num'].astype(str)
- df.replace('<NA>', '', inplace=True)
- total_num = df.shape[0]
- detail = df.iloc[(item.page_num - 1) * item.page_size: item.page_num * item.page_size].to_dict('records')
- response.result = detail
- response.total_num = total_num
- response.page_num = item.page_num
- response.page_size = item.page_size
- response.config_id = item.config_id
- else:
- response.message = "没有符合条件的数据"
- response.page_num = item.page_num
- response.page_size = item.page_size
- response.config_id = item.config_id
- logger.info(f"request body: {item}, response body: {response}")
- except:
- response.message = traceback.format_exc()
- response.code = -1
- response.success = False
- logger.error(f"request body: {item}, response body: {response}")
- return response
- @logger.catch
- @router.post('/add_script_config/',
- description="新增脚本配置",
- summary='新增脚本配置',
- response_model=BaseResponse)
- def add_script_config(item: AddScriptConfig):
- try:
- logger.info(f"request body: {item}")
- multi_config_df = pd.DataFrame()
- for pair in item.query_word_pair:
- config_id = str(uuid.uuid4())
- config_df = pd.DataFrame({'query_word': pair.query_word,
- 'recommended_word': [""] if len(pair.recommended_word) == 0 else pair.recommended_word,
- 'config_id': config_id})
- multi_config_df = multi_config_df.append(config_df)
- multi_config_df['operator'] = item.operator
- multi_config_df['operate_type'] = 1
- multi_config_df['user_id'] = item.user_id
- # 新增配置记录插入到 ctop_ai_script_query_word_config
- multi_config_df.to_sql(name="ctop_ai_script_query_word_config",
- con=ai_word_engine,
- if_exists='append',
- index=False)
- # 手动数据库关闭连接
- ai_word_engine.dispose()
- logger.info(f"request body: {item}, code:0, message: add_script_config success")
- return {"code": 0,
- "message": "add success",
- "success": True}
- except:
- logger.error(f"request body: {item}, code:-1, message: add_script_config fail {traceback.format_exc()}")
- return {"code": -1,
- "message": traceback.format_exc(),
- "success": False}
- @logger.catch
- @router.post('/delete_script_config/',
- description="删除脚本配置",
- summary='删除脚本配置',
- response_model=BaseResponse)
- def delete_script_config(item: DeleteScriptConfig):
- response = BaseResponse(code=0, message='delete success', success=True)
- try:
- logger.info(f"request body: {item}")
- sql = f"select * from ctop_ai_script_query_word_config where config_id = '{item.config_id}'"
- config_df = pd.read_sql(sql, ai_word_engine)
- if not config_df.empty:
- column_type = {'config_id': VARCHAR(36),
- 'query_word': VARCHAR(36),
- 'recommended_word': VARCHAR(36)}
- update_config_df = config_df.copy(deep=True)
- update_config_df['end_time'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
- update_config_df.set_index(['config_id', 'query_word', 'recommended_word', 'operate_type'], drop=True, inplace=True)
- upsert(engine=ai_word_engine,
- df=update_config_df,
- table_name='ctop_ai_script_query_word_config',
- if_row_exists='update',
- dtype=column_type)
- add_config_df = config_df.copy(deep=True)
- add_config_df['operate_type'] = 3
- add_config_df['user_id'] = item.user_id
- add_config_df['operator'] = item.operator
- add_config_df['start_time'] = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
- add_config_df.drop(labels='end_time', axis=1, inplace=True)
- add_config_df.set_index(['config_id', 'query_word', 'recommended_word', 'operate_type'], drop=True, inplace=True)
- upsert(engine=ai_word_engine,
- df=add_config_df,
- table_name='ctop_ai_script_query_word_config',
- if_row_exists='update',
- dtype=column_type)
- else:
- response.message = f'没有获取到 {item.config_id} 对应的数据'
- # 手动数据库关闭连接
- ai_word_engine.dispose()
- logger.info(f"request body: {item}, response: {response}")
- except:
- response.code = -1
- response.message = traceback.format_exc()
- response.success = False
- logger.error(f"request body: {item}, response: {response}")
- return response
- @logger.catch
- @router.post('/get_material_and_script_time_task/',
- response_model=BaseResponse,
- description="获取素材和脚本任务",
- summary='获取素材和脚本任务')
- def get_material_and_script_time_task(start_time: Optional[str] = ""):
- try:
- get_material_and_script(start_time)
- logger.info(f"{date.today().strftime('%Y-%m-%d')}, 获取素材和脚本任务执行完成.")
- return {"code": 0,
- "success": True,
- "message": f"{date.today().strftime('%Y-%m-%d')},获取素材和脚本任务执行完成."}
- except:
- logger.error(f"{date.today().strftime('%Y-%m-%d')}, 获取素材和脚本任务执行发生异常. {traceback.format_exc()}")
- return {"code": -1,
- "success": False,
- "message": f"{date.today().strftime('%Y-%m-%d')},获取素材和脚本任务执行发生异常 .{traceback.format_exc()}"}
- if __name__ == '__main__':
- req = DeleteScriptConfig(config_id='4113ec60-4b92-4a4a-9dc5-417a29df9b65',
- operator='隋炎均',
- user_id='f75b91a1a23946688ab1d93a65d0a435')
- delete_script_config(req)
|