| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178 | 
							- import json
 
- import os
 
- from urllib.parse import urlencode
 
- import pandas as pd
 
- import requests
 
- import yaml
 
- from common_func import get_db_engine, mysql_replace_into, NpEncoder, get_logger
 
- import datetime
 
- from config.url import get_material_info_from_ocean_engine_url, get_video_info_from_ocean_engine_url
 
- import traceback
 
- from get_script_from_tengxunyun import GetScriptFromTengXunYunServer
 
- def get_material_info(project_name, period_type):
 
-     material_df = pd.DataFrame()  # 初始化返回的结果
 
-     has_more = True  # 是否还存在分页数据, 初始化为 True
 
-     limit = 10  # 每页获取10条
 
-     page = 1  # 第几页
 
-     while has_more:
 
-         request_data = {'list_type': 1,
 
-                         'material_type': 3,
 
-                         'order_by': 'click_show_rate',
 
-                         'period_type': period_type,
 
-                         'aggr_app_code': 4,
 
-                         'aggr_category_list': '[]',
 
-                         'video_type': '[]',
 
-                         'keywords': project_name,
 
-                         'landing_type': '[]',
 
-                         'limit': limit,
 
-                         'page': page,
 
-                         'video_duration_type': 5}
 
-         try:
 
-             request_path = get_material_info_from_ocean_engine_url + '?' + urlencode(request_data)
 
-             request = requests.get(request_path)
 
-             result = json.loads(request.text)
 
-             material_page_df = pd.DataFrame(result['data']['materials'])
 
-             material_df = material_df.append(material_page_df)
 
-             if result.get('code') == 0 and result.get('data').get('has_more') is True:
 
-                 page += 1
 
-                 logger.info("project_name:%s, get_material_info_from_ocean_engine 分页获取第 %s 页" % (project_name, page))
 
-             else:
 
-                 has_more = False
 
-         except:
 
-             logger.error("project_name:%s, get_material_info_from_ocean_engine 分页获取时发生异常信息: %s" %
 
-                          (project_name, traceback.format_exc()))
 
-     logger.info("project_name:%s, get_material_info_from_ocean_engine 分页获取获取完成,共 %s 个物料信息!" %
 
-                 (project_name, len(material_df)))
 
-     # 数据类型的处理,方便入库
 
-     # metrics dict to  str
 
-     # title list to str
 
-     # video_type list to str
 
-     # watermarks list to str
 
-     material_df[['metrics', 'title', 'video_type', 'watermarks']] = \
 
-         material_df[['metrics', 'title', 'video_type', 'watermarks']].astype(str)
 
-     material_df.rename(columns={'vid': 'signature'}, inplace=True)
 
-     # 添加项目名称和日期
 
-     material_df['project_name'] = project_name
 
-     material_df['stat_date'] = datetime.datetime.today().strftime('%Y-%m-%d')
 
-     # 写入数据库
 
-     material_df.to_sql(name="ctop_ai_material_info_from_ocean_engine",
 
-                        con=write_engine,
 
-                        if_exists='append',
 
-                        index=False,
 
-                        chunksize=chunk_size,
 
-                        method=mysql_replace_into)
 
-     return material_df
 
- def get_video_info(vid, project_name):
 
-     """
 
-     为了提高数据获取的完整性,每次只请求10条数据
 
-     :param vid:
 
-     :param project_name:
 
-     :return:
 
-     """
 
-     video_df = pd.DataFrame()
 
-     # 每次请求的视频个数
 
-     cnt_per_request = 10
 
-     # 总的视频个数
 
-     total_cnt = len(vid)
 
-     logger.info("project_name:%s, get_video_info_from_ocean_engine,共 %s 个视频需要请求接口获取video_url" % (project_name, total_cnt))
 
-     for i in range(0, total_cnt, cnt_per_request):
 
-         if i + cnt_per_request < total_cnt:
 
-             query_ids = vid[i: i + cnt_per_request]
 
-             logger.info("project_name:%s, get_video_info_from_ocean_engine 分页获取第 %s 个 到 %s 个视频信息" %
 
-                         (project_name, i, i + cnt_per_request))
 
-         else:
 
-             query_ids = vid[i:]
 
-             logger.info("project_name:%s, get_video_info_from_ocean_engine 分页获取第 %s 个 到 %s 个视频信息" %
 
-                         (project_name, i, total_cnt))
 
-         try:
 
-             request_data = {"query_ids": query_ids, "water_mark": "creative_center"}
 
-             request = requests.post(url=get_video_info_from_ocean_engine_url,
 
-                                     headers={'Content-Type': 'application/json'},
 
-                                     data=json.dumps(request_data, cls=NpEncoder)
 
-                                     )
 
-             response_data = json.loads(request.text)
 
-             if response_data.get('code') == 0 and response_data.get('data'):
 
-                 for key, value in response_data['data'].items():
 
-                     single_dict = value
 
-                     single_dict['signature'] = key
 
-                     single_df = pd.DataFrame([single_dict])
 
-                     video_df = video_df.append(single_df)
 
-         except:
 
-             logger.error("project_name:%s, get_video_info_from_ocean_engine 分页获取 %s,出现异常信息:%s" %
 
-                          (project_name, query_ids, traceback.format_exc()))
 
-     # 数据类型的处理,方便入库
 
-     # play_info list to  str
 
-     video_df['play_info'] = video_df['play_info'].astype(str)
 
-     video_df.drop(labels='video_id', axis=1, inplace=True)
 
-     # 添加项目名称和日期
 
-     video_df['project_name'] = project_name
 
-     video_df['stat_date'] = datetime.datetime.today().strftime('%Y-%m-%d')
 
-     # 写入数据库
 
-     video_df.to_sql(name="ctop_ai_video_info_from_ocean_engine",
 
-                     con=write_engine,
 
-                     if_exists='append',
 
-                     index=False,
 
-                     chunksize=chunk_size,
 
-                     method=mysql_replace_into)
 
-     return video_df
 
- if __name__ == '__main__':
 
-     # 创建日志对象
 
-     logger = get_logger(log_file_name="/data/pythonProject/video-to-word/logs/get_video_from_ocean_engine.log",
 
-                         log_name='get_video_from_ocean_engine_logger')
 
-     logger.info("get_video_from_ocean_engine started! id of logger is: %s" % id(logger))
 
-     # 1 读取配置文件
 
-     with open('/data/pythonProject/video-to-word/config/config.yaml', mode='r', encoding='utf-8') as f:
 
-         config = yaml.load(f.read(), Loader=yaml.FullLoader)
 
-     # 1-1 数据库连接引擎,依据开发环境/生产环境 进行切换
 
-     # 读数据库引擎使用生产数据库,写数据库引擎依据系统环境进行切换(测试数据库/生产数据库)
 
-     # 注意:该项目的读和写 都使用测试数据库
 
-     write_engine = get_db_engine(config['devDB'])
 
-     read_engine = get_db_engine(config['devDB'])
 
-     # 1-2 分批写入数据库的行数
 
-     chunk_size = config['chunkSize']
 
-     # 1-3 读取项目列表
 
-     project_name_lst = config['projectName']
 
-     # 2  分项目获取巨量引擎数据
 
-     for project in project_name_lst:
 
-         try:
 
-             logger.info("****************************** %s 项目开始执行 ******************************" % project)
 
-             # 2-1 获取物料列表并写入数据库
 
-             material_info_df = get_material_info(project, 7)
 
-             # 2-2 根据 signature 获取 url并写入数据库
 
-             vid_lst = material_info_df['signature'].values
 
-             video_info_df = get_video_info(vid_lst, project)
 
-             # 2-3 向腾讯云提交语音转脚本的任务
 
-             task_df = video_info_df[['signature', 'video_url']]
 
-             get_script_ins = GetScriptFromTengXunYunServer(logger, read_engine, task_df, task_ids=None)
 
-             get_script_ins.submit_task()
 
-             if get_script_ins.task_ids:
 
-                 get_script_ins.get_result()
 
-             logger.info("****************************** %s 项目执行完成 ******************************" % project)
 
-         except:
 
-             logger.error("project_name: %s, 发生异常信息: %s" % (project, traceback.format_exc()))
 
 
  |