|
@@ -0,0 +1,162 @@
|
|
|
+import pandas as pd
|
|
|
+from config.url import get_material_info_from_ocean_engine_url, get_video_info_from_ocean_engine_url
|
|
|
+from urllib.parse import urlencode
|
|
|
+import requests
|
|
|
+import json
|
|
|
+import traceback
|
|
|
+import datetime
|
|
|
+from common_func import mysql_replace_into, NpEncoder, get_db_engine, get_logger
|
|
|
+
|
|
|
+
|
|
|
+class GetMaterialFromOceanEngine(object):
|
|
|
+ def __init__(self, query_word, period_type, logger, db_engine):
|
|
|
+ self.query_word = query_word # 查询词
|
|
|
+ self.period_type = period_type # 查询周期:近3天、近7天等
|
|
|
+ self.logger = logger
|
|
|
+ self.db_engine = db_engine
|
|
|
+ self.signature_lst = None
|
|
|
+
|
|
|
+ def get_material_basic_info(self):
|
|
|
+ """
|
|
|
+ 依据关键词调用巨量引擎接口,获取物料列表,得到物料基本信息如:物料的最佳标题、行业标签、video_id等
|
|
|
+ """
|
|
|
+ # 1 请求巨量引擎接口获取物料基本信息
|
|
|
+ self.logger.info("查询词:%s, 调用头条巨量引擎物料接口,开始执行-------------------- " % self.query_word)
|
|
|
+ material_df = pd.DataFrame() # 初始化返回的结果
|
|
|
+ has_more = True # 是否还存在分页数据, 初始化为 True
|
|
|
+ limit = 10 # 每页获取10条
|
|
|
+ page = 1 # 第几页
|
|
|
+ while has_more:
|
|
|
+ request_data = {'list_type': 1,
|
|
|
+ 'material_type': 3,
|
|
|
+ 'order_by': 'click_show_rate',
|
|
|
+ 'period_type': self.period_type,
|
|
|
+ 'aggr_app_code': 4,
|
|
|
+ 'aggr_category_list': '[]',
|
|
|
+ 'video_type': '[]',
|
|
|
+ 'keywords': self.query_word,
|
|
|
+ 'landing_type': '[]',
|
|
|
+ 'limit': limit,
|
|
|
+ 'page': page,
|
|
|
+ 'video_duration_type': 5}
|
|
|
+
|
|
|
+ try:
|
|
|
+ request_path = get_material_info_from_ocean_engine_url + '?' + urlencode(request_data)
|
|
|
+ request = requests.get(request_path)
|
|
|
+ result = json.loads(request.text)
|
|
|
+ material_page_df = pd.DataFrame(result['data']['materials'])
|
|
|
+ material_df = material_df.append(material_page_df)
|
|
|
+ if result.get('code') == 0 and result.get('data').get('has_more') is True:
|
|
|
+ page += 1
|
|
|
+ self.logger.info("查询词:%s, 调用头条巨量引擎物料接口, 分页获取第 %s 页" % (self.query_word, page))
|
|
|
+ else:
|
|
|
+ has_more = False
|
|
|
+ except:
|
|
|
+ self.logger.error("查询词:%s, 调用头条巨量引擎物料接口, 分页获取时发生异常信息: %s" %
|
|
|
+ (self.query_word, traceback.format_exc()))
|
|
|
+
|
|
|
+ self.logger.info("查询词:%s, 调用 调用头条巨量引擎物料接口, 结束执行,共 %s 个物料信息!" %
|
|
|
+ (self.query_word, len(material_df)))
|
|
|
+
|
|
|
+ # 2 获取到的信息存入数据库
|
|
|
+ # 2-1 数据字段类型的处理,方便入库
|
|
|
+ # metrics: dict to str
|
|
|
+ # title: list to str
|
|
|
+ # video_type: list to str
|
|
|
+ # watermarks: list to str
|
|
|
+ material_df[['metrics', 'title', 'video_type', 'watermarks']] = \
|
|
|
+ material_df[['metrics', 'title', 'video_type', 'watermarks']].astype(str)
|
|
|
+ material_df.rename(columns={'vid': 'signature'}, inplace=True)
|
|
|
+
|
|
|
+ # 2-1 添加查询词和日期
|
|
|
+ material_df['query_word'] = self.query_word
|
|
|
+ material_df['stat_date'] = datetime.datetime.today().strftime('%Y-%m-%d')
|
|
|
+
|
|
|
+ # 2-3 写入数据库, 表中以 signature + query_word 作为联合唯一键,写入数据库时如果唯一键重复,则 replace_into
|
|
|
+ material_df.to_sql(name="ctop_ai_material_info_from_ocean_engine",
|
|
|
+ con=self.db_engine,
|
|
|
+ if_exists='append',
|
|
|
+ index=False,
|
|
|
+ chunksize=1000,
|
|
|
+ method=mysql_replace_into)
|
|
|
+
|
|
|
+ # 3 得到素材的video_id
|
|
|
+ self.signature_lst = list(material_df['signature'].values)
|
|
|
+
|
|
|
+ def get_video_basic_info(self):
|
|
|
+ """
|
|
|
+ 依据素材签名调用巨量引擎的接口,获取视频的基本信息,如视频时长、宽度、高度、url链接等
|
|
|
+ """
|
|
|
+ # 1 请求巨量引擎接口获取视频基本信息
|
|
|
+ video_df = pd.DataFrame()
|
|
|
+ # 分次请求:每次请求的视频个数(为了提高数据获取的完整性,每次只请求10条数据)
|
|
|
+ cnt_per_request = 10
|
|
|
+ # 总的视频个数
|
|
|
+ total_cnt = len(self.signature_lst)
|
|
|
+ self.logger.info("查询词:%s, 调用头条巨量引擎视频接口,共 %s 个开始执行----------------------" % (self.query_word, total_cnt))
|
|
|
+ for i in range(0, total_cnt, cnt_per_request):
|
|
|
+ if i + cnt_per_request < total_cnt:
|
|
|
+ query_ids = self.signature_lst[i: i + cnt_per_request]
|
|
|
+ self.logger.info("查询词:%s, 调用头条巨量引擎视频接口 分页获取第 %s 个 到 %s 个视频信息" %
|
|
|
+ (self.query_word, i, i + cnt_per_request))
|
|
|
+ else:
|
|
|
+ query_ids = self.signature_lst[i:]
|
|
|
+ self.logger.info("查询词:%s, 调用头条巨量引擎视频接口 分页获取第 %s 个 到 %s 个视频信息" %
|
|
|
+ (self.query_word, i, total_cnt))
|
|
|
+
|
|
|
+ try:
|
|
|
+ request_data = {"query_ids": query_ids, "water_mark": "creative_center"}
|
|
|
+ request = requests.post(url=get_video_info_from_ocean_engine_url,
|
|
|
+ headers={'Content-Type': 'application/json'},
|
|
|
+ data=json.dumps(request_data, cls=NpEncoder)
|
|
|
+ )
|
|
|
+ response_data = json.loads(request.text)
|
|
|
+
|
|
|
+ if response_data.get('code') == 0 and response_data.get('data'):
|
|
|
+ for key, value in response_data['data'].items():
|
|
|
+ single_dict = value
|
|
|
+ single_dict['signature'] = key
|
|
|
+ single_df = pd.DataFrame([single_dict])
|
|
|
+ video_df = video_df.append(single_df)
|
|
|
+ except:
|
|
|
+ self.logger.error("查询词:%s, 调用头条巨量引擎视频接口 分页获取 %s,出现异常信息:%s" %
|
|
|
+ (self.query_word, query_ids, traceback.format_exc()))
|
|
|
+
|
|
|
+ self.logger.info("查询词:%s, 调用头条巨量引擎视频接口,共 %s 个执行完成----------------------" % (self.query_word, total_cnt))
|
|
|
+
|
|
|
+ # 2 获取到的信息存入数据库
|
|
|
+ # 2-1 数据类型的处理,方便入库
|
|
|
+ # play_info: list to str
|
|
|
+ video_df['play_info'] = video_df['play_info'].astype(str)
|
|
|
+ video_df.drop(labels='video_id', axis=1, inplace=True)
|
|
|
+
|
|
|
+ # 2-2 添加查询词和日期
|
|
|
+ video_df['query_word'] = self.query_word
|
|
|
+ video_df['stat_date'] = datetime.datetime.today().strftime('%Y-%m-%d')
|
|
|
+
|
|
|
+ # 2-3 写入数据库, 表中以 signature + query_word 作为联合唯一键,写入数据库时如果唯一键重复,则 replace_into
|
|
|
+ video_df.to_sql(name="ctop_ai_video_info_from_ocean_engine",
|
|
|
+ con=self.db_engine,
|
|
|
+ if_exists='append',
|
|
|
+ index=False,
|
|
|
+ chunksize=1000,
|
|
|
+ method=mysql_replace_into)
|
|
|
+
|
|
|
+
|
|
|
+if __name__ == '__main__':
|
|
|
+ db_info = {'username': 'hcst',
|
|
|
+ 'password': 'hcst@2020',
|
|
|
+ 'host': '139.186.165.84',
|
|
|
+ 'port': 3306,
|
|
|
+ 'database': 'db_ai_word'}
|
|
|
+ db_engine = get_db_engine(db_info)
|
|
|
+
|
|
|
+ logger = get_logger(log_file_name="/data/pythonProject/video-to-word/logs/get_video_from_ocean_engine.log",
|
|
|
+ log_name='get_video_from_ocean_engine_logger')
|
|
|
+ logger.info("get_video_from_ocean_engine started! id of logger is: %s" % id(logger))
|
|
|
+
|
|
|
+ inst = GetMaterialFromOceanEngine(query_word='淘特', period_type=7, logger=logger, db_engine=db_engine)
|
|
|
+ inst.get_material_basic_info()
|
|
|
+ if inst.signature_lst:
|
|
|
+ inst.get_video_basic_info()
|
|
|
+
|