evaluate_script.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313
  1. import random
  2. import jieba
  3. import numpy as np
  4. import os
  5. import pickle
  6. import sys
  7. import torch
  8. from fastapi import APIRouter
  9. from loguru import logger
  10. import traceback
  11. import hashlib
  12. import os
  13. import sys
  14. import traceback
  15. import uuid
  16. from datetime import date, datetime
  17. from datetime import timedelta
  18. from io import BytesIO
  19. from typing import Optional, List
  20. from urllib.parse import quote
  21. import pandas as pd
  22. import yaml
  23. from fastapi import APIRouter
  24. from fastapi.responses import StreamingResponse
  25. from loguru import logger
  26. from pangres import upsert
  27. from pydantic import BaseModel, Field
  28. curr_path = os.path.abspath(os.path.dirname(__file__))
  29. project_root_path = curr_path[:curr_path.find("video_to_word") + len("video_to_word")]
  30. sys.path.append(project_root_path)
  31. from config.url_and_db import toutiao_static_video_url, ai_word_engine
  32. with open('/data/pythonProject/video_to_word/script_score/pkl/vocab.pkl', 'rb') as f:
  33. vocab = pickle.load(f)
  34. with open('/data/pythonProject/video_to_word/script_score/pkl/word_to_idx.pkl', 'rb') as f:
  35. word_to_idx = pickle.load(f)
  36. log_score_min = -19.266664505004883
  37. log_score_max = 0
  38. def split_by_jieba(x):
  39. seg_list = jieba.cut(x)
  40. seg_list = ','.join(seg_list)
  41. seg_list = seg_list.split(",")
  42. seg_list = [v for v in seg_list if v != ',']
  43. return seg_list
  44. def encode_samples(tokenized_samples):
  45. features = []
  46. for sample in tokenized_samples:
  47. feature = []
  48. for token in sample[0]:
  49. if token in word_to_idx:
  50. feature.append(word_to_idx[token])
  51. else:
  52. feature.append(0)
  53. features.append(feature)
  54. return features
  55. def pad_samples(features, maxlen=113, PAD=0):
  56. padded_features = []
  57. for feature in features:
  58. if len(feature) >= maxlen:
  59. padded_feature = feature[:maxlen]
  60. else:
  61. padded_feature = feature
  62. while len(padded_feature) < maxlen:
  63. padded_feature.append(PAD)
  64. padded_features.append(padded_feature)
  65. return padded_features
  66. def log_and_map_min_max_score(score):
  67. # log 变换
  68. log_score = np.log(score)
  69. # 映射到 0-100 分
  70. script_score = 0 + (100 - 0) / (log_score_max - log_score_min) * (log_score - log_score_min)
  71. script_score = int(script_score)
  72. # 映射到 等级 (优:(95,100], 良:(85,95], 低质:(0,85])
  73. # 1:'优', 2:'良', 3:'低质'
  74. script_level = 3 if script_score <= 85 else (2 if script_score <= 95 else 1)
  75. high_quality_prob = 0.1905 if script_score <= 85 else (0.4330 if script_score <= 95 else 0.6916)
  76. return script_score, script_level, high_quality_prob
  77. router = APIRouter(tags=['script_score_server'])
  78. class BaseResponse(BaseModel):
  79. message: str = Field(..., description='消息')
  80. success: bool = Field(True, description='true or false')
  81. code: int = Field(0, description='')
  82. class GetScriptScoreRequest(BaseModel):
  83. script: str = Field(..., description="脚本内容")
  84. user_name: str = Field(..., description="用户名")
  85. user_id: str = Field(..., description="用户id")
  86. class Config:
  87. schema_extra = {
  88. "example": {
  89. "script": "十六块二十六块,只要二十六块包邮到家,这么大一件派克服,现在不要两百,不要一百二十六块就给你包邮到家,真的太划算了,咱们工厂现在为了扩大销售渠道,所以特地拿出一批货在桃树上做活动,这款派克服寒气版型,特别时尚,抽绳收腰设计,修身显瘦,加绒内里还保暖毛领,精致又洋气,喜欢的朋友赶紧点击视频下方链接进入操作即可就可以购买啦。",
  90. "user_name": "管理员",
  91. "user_id": "e9ca23d68d884d4ebb19d07889727dae"
  92. }
  93. }
  94. class ScriptScoreInfo(BaseModel):
  95. unique_id: str = Field(..., description="脚本配置id")
  96. script_text: str = Field(..., description="脚本内容")
  97. script_score: int = Field(..., description="脚本得分")
  98. script_level: int = Field(..., description="脚本评分等级{1:'优', 2:'良', 3:'低质'}")
  99. high_quality_prob: float = Field(..., description="跑量的概率")
  100. script_hash: str = Field(..., description="脚本内容哈希值")
  101. user_name: str = Field(..., description="用户名")
  102. user_id: str = Field(..., description="用户id")
  103. start_time: datetime = Field(datetime.now().strftime('%Y-%m-%dT%H:%M:%S'), description="时间")
  104. class GetScriptScoreLstResponse(BaseResponse):
  105. total_num: int = Field(0, description="总个数")
  106. page_num: int = Field(1, description="第几页")
  107. page_size: int = Field(10, description="每页个数")
  108. result: List[ScriptScoreInfo] = Field([], description="结果详情")
  109. class GetSingleScriptScore(BaseResponse):
  110. result: ScriptScoreInfo = Field(None, description="结果详情")
  111. class ScriptScoreLstRequest(BaseModel):
  112. start_date: Optional[date] = Field(date.today() + timedelta(days=-29), description="开始日期-用于查询")
  113. end_date: Optional[date] = Field(date.today(), description="结束日期-用于查询")
  114. search_word: Optional[str] = Field('', description="查询词")
  115. user_name: Optional[str] = Field('', description="用户名称")
  116. script_level: Optional[List[int]] = Field([], description="评分等级{1:'优', 2:'良', 3:'低质'}")
  117. page_num: int = Field(1, description="第几页")
  118. page_size: int = Field(10, description="每页的大小")
  119. class DeleteScriptScoreRequest(BaseModel):
  120. unique_id: str = Field(..., description="唯一标识")
  121. user_name: str = Field(..., description="用户名")
  122. user_id: str = Field(..., description="用户id")
  123. class Config:
  124. schema_extra = {
  125. "example": {
  126. "unique_id": "89d4d72a-8c7d-4c89-8b65-258d7206cd6b",
  127. "user_name": "管理员",
  128. "user_id": "e9ca23d68d884d4ebb19d07889727dae"
  129. }
  130. }
  131. @logger.catch()
  132. @router.post("/delete_script_score",
  133. description="删除脚本评级",
  134. summary="删除脚本评级",
  135. response_model=BaseResponse)
  136. def delete_script_score(item: DeleteScriptScoreRequest):
  137. response = BaseResponse(code=0, message='delete success', success=True)
  138. try:
  139. logger.info(f"request body: {item}")
  140. sql = f"select * from ctop_ai_script_score where unique_id = '{item.unique_id}' " \
  141. f"and operate_type = 1 " \
  142. f"and end_time = '9999-12-31'"
  143. df = pd.read_sql(sql, ai_word_engine)
  144. if df.empty:
  145. response = BaseResponse(code=-2, message='不存在该条记录', success=False)
  146. return response
  147. # 判断删除者是否为该条记录的创建者,如果不是,则没用权限删除该条记录
  148. creator_id = df['user_id'].values[0]
  149. if creator_id != item.user_id:
  150. response = BaseResponse(code=-1, message='没有删除权限', success=False)
  151. else:
  152. # 数据拉链表方式
  153. # 修改历史记录的 end_time, 使其失效
  154. # 新增删除记录,用于记录删除的时间
  155. update_df = df.copy(deep=True)
  156. update_df['end_time'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
  157. update_df.set_index('unique_id', drop=True, inplace=True)
  158. upsert(engine=ai_word_engine,
  159. df=update_df,
  160. table_name='ctop_ai_script_score',
  161. if_row_exists='update')
  162. add_df = df.copy(deep=True)
  163. add_df['operate_type'] = 3
  164. add_df['start_time'] = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
  165. add_df.set_index('unique_id', drop=True, inplace=True)
  166. upsert(engine=ai_word_engine,
  167. df=add_df,
  168. table_name='ctop_ai_script_score',
  169. if_row_exists='update')
  170. logger.info(f"request body: {item}, response body: {response}")
  171. except:
  172. response.code = -1
  173. response.message = traceback.format_exc()
  174. response.success = False
  175. logger.error(f"request body: {item}, response: {response}")
  176. return response
  177. @logger.catch()
  178. @router.post("/get_script_score_lst",
  179. description="获取脚本评级列表",
  180. summary="获取脚本评级列表",
  181. response_model=GetScriptScoreLstResponse)
  182. def get_script_score_lst(item: ScriptScoreLstRequest):
  183. response = GetScriptScoreLstResponse(message="查询成功")
  184. try:
  185. end_date = item.end_date + timedelta(days=1)
  186. script_level_lst = [-1, -2] if len(item.script_level) == 0 else (item.script_level * 2 if len(item.script_level) == 1 else item.script_level)
  187. sql = f"select * from ctop_ai_script_score " \
  188. f"where ('{item.search_word}' = '' or script_text like '%%{item.search_word}%%' ) " \
  189. f"and ('{item.user_name}' = '' or user_name like '%%{item.user_name}%%' ) " \
  190. f"and ('{item.script_level}' = '[]' or script_level in {tuple(script_level_lst)} ) " \
  191. f"and operate_type = 1 and end_time ='9999-12-31' " \
  192. f"and start_time >= '{item.start_date}' and start_time < '{end_date}' "
  193. org_df = pd.read_sql(sql, ai_word_engine)
  194. if not org_df.empty:
  195. org_df.sort_values(by='start_time', ascending=False, inplace=True)
  196. org_df['number'] = list(range(1, len(org_df) + 1))
  197. total_num = org_df.shape[0]
  198. detail = org_df.iloc[(item.page_num - 1) * item.page_size: item.page_num * item.page_size].to_dict('records')
  199. response.result = detail
  200. response.total_num = total_num
  201. response.page_num = item.page_num
  202. response.page_size = item.page_size
  203. else:
  204. response.message = "没有符合条件的数据"
  205. response.page_num = item.page_num
  206. response.page_size = item.page_size
  207. logger.info(f"request body: {item}, response body: {response}")
  208. except:
  209. response.code = -1
  210. response.message = traceback.format_exc()
  211. response.success = False
  212. logger.error(f"request body: {item}, response body: {response}")
  213. return response
  214. @logger.catch()
  215. @router.post("/get_script_score",
  216. description="脚本质量评级",
  217. summary="脚本质量评级",
  218. response_model=GetSingleScriptScore)
  219. def get_script_score(item: GetScriptScoreRequest):
  220. response = GetSingleScriptScore(message="脚本评分完成")
  221. try:
  222. script_split_lst = split_by_jieba(item.script)
  223. feature = torch.tensor(pad_samples(encode_samples([[script_split_lst]])))
  224. from script_score.lstm_network import SentimentNet
  225. net = torch.load('/data/pythonProject/video_to_word/script_score/pkl/epoch52_test_auc_0.790_train_auc_0.815.pth',
  226. map_location='cpu')
  227. with torch.no_grad():
  228. score = net(feature)
  229. script_score, script_level, high_quality_prob = log_and_map_min_max_score(score[0][1].item())
  230. unique_id = str(uuid.uuid4())
  231. response.result = {"script_level": script_level,
  232. "script_score": script_score,
  233. "high_quality_prob": high_quality_prob,
  234. "script_hash": hashlib.md5(item.script.encode('utf-8')).hexdigest(),
  235. "script_text": item.script,
  236. "unique_id": unique_id,
  237. "user_id": item.user_id,
  238. "user_name": item.user_name,
  239. "operate_type": 1,
  240. }
  241. # write to db
  242. script_info_df = pd.DataFrame([response.result])
  243. script_info_df.to_sql(name="ctop_ai_script_score",
  244. con=ai_word_engine,
  245. if_exists='append',
  246. index=False)
  247. logger.info(f"request body: {item}, response body: {response}")
  248. except:
  249. response.code = -1
  250. response.success = False
  251. response.message = traceback.format_exc()
  252. logger.error(f"request body: {item}, response body: {response}")
  253. return response
  254. if __name__ == '__main__':
  255. text = "十六块二十六块,只要二十六块包邮到家,这么大一件派克服,现在不要两百,不要一百二十六块就给你包邮到家,真的太划算了,咱们工厂现在为了扩大销售渠道,所以特地拿出一批货在桃树上做活动,这款派克服寒气版型,特别时尚,抽绳收腰设计,修身显瘦,加绒内里还保暖毛领,精致又洋气,喜欢的朋友赶紧点击视频下方链接进入操作即可就可以购买啦。"
  256. print(get_script_score(text))