liyuyi@c-top.com.cn 4 anni fa
parent
commit
0f682a69b1

+ 30 - 26
config/config.yaml

@@ -1,41 +1,45 @@
-projectName:
-  - '淘特'
-  - '支付宝'
-
-
-projectInfoForExportScript:
-  -
-      project_id: (458)
-      project_name: '淘特'
-      channel:
-        - 1 # 汇创思拓
-        - 0 # 巨量引擎
-  -
-      project_id: (67,123,42)
-      project_name: '支付宝'
-      channel:
-        - 1 # 汇创思拓
-        - 0 # 巨量引擎
-
-
-# 生产数据库 jeecg-boot
-productDB:
+source_name_map:
+  1: '内部'
+  2: '头条巨量'
+  3: '快手开眼'
+
+
+# jeecg-boot 生产数据库
+jeecg_boot_product_db:
+  host: 139.186.27.96
+  username: data
+  password: hcst@2021
+  port: 3390
+  database: jeecg-boot
+
+# jeecg-boot 测试数据库
+jeecg_boot_dev_db:
   host: 139.186.27.96
   username: data
   password: hcst@2021
   port: 3390
   database: jeecg-boot
 
-# 开发测试数据库 db_ai_word
-devDB:
+# db_ai_word 测试数据库
+ai_word_dev_db:
   host: 139.186.165.84
   username: hcst
   password: hcst@2020
   port: 3306
   database: db_ai_word
 
+
+# db_ai_word 生产数据库
+ai_word_product_db:
+  host: 139.186.165.84
+  username: hcst
+  password: hcst@2020
+  port: 3306
+  database: db_ai_word
+
+
 # 本地数据库 mysql
-localDB:
+local_db:
   host: 192.168.1.193
   username: root
   password: root@123
@@ -43,4 +47,4 @@ localDB:
   database: mysql
 
 # 每次写入数据库的行数
-chunkSize: 200
+chunk_size: 200

+ 3 - 0
config/url.py

@@ -20,3 +20,6 @@ get_video_info_from_kuaishou_kaiyan = "https://cc.e.kuaishou.com/rest/creativeCe
 
 # 头条无水印链接
 get_none_water_mark_url = "http://i.snssdk.com/video/urls/1/toutiao/mp4/"
+
+# 头条素材永久链接
+toutiao_static_video_url = "http://i.snssdk.com/video/code/1/toutiao/"

+ 0 - 79
get_data.py

@@ -1,79 +0,0 @@
-from concurrent.futures import ThreadPoolExecutor
-import requests
-import datetime
-import json
-import pandas as pd
-import uuid
-import traceback
-from sqlalchemy import create_engine
-from urllib import parse
-from urllib.parse import urlencode
-
-if __name__ == '__main__':
-    online_db_con_str = "mysql+pymysql://%s:%s@%s:%d/%s" % ("readonly", parse.quote_plus("hcst@2021"), "139.186.27.96", 3390, "jeecg-boot")
-    online_engine = create_engine(online_db_con_str, connect_args={'charset': 'utf8'})
-
-    test_db_con_str = "mysql+pymysql://%s:%s@%s:%d/%s" % ("hcst", parse.quote_plus("hcst@2020"), "139.186.165.84", 3306, "db_ai_word")
-    test_engine = create_engine(test_db_con_str, connect_args={'charset': 'utf8'})
-
-    task_submit_url = 'http://139.186.165.84:31013/asr/task/submit'
-    task_result_url = 'http://139.186.165.84:31013/asr/task/result'
-
-    # 1、获取素材信息
-    # get_material_info_sql = """
-    # select t2.signature, t2.url
-    # from
-    # (select signature, account_id from ctop_kuaishou_report_daily_material
-    #        where account_id in (select account_id from ctop_user_allocation where project_id = 458)
-    #        group by signature
-    #        having sum(activation) <= 10 and sum(activation) >= 1
-    #        ) t1
-    # left join
-    # ctop_kuaishou_video_get t2
-    # on t1.signature = t2.signature and t1.account_id = t2.account_id
-    # where t2.signature is not null
-    # """
-    # material_info_df = pd.read_sql(get_material_info_sql, online_engine)
-    # material_info_df = material_info_df[(~material_info_df.signature.isnull()) & (~material_info_df.url.isnull())]
-    # N = 3000 if len(material_info_df) >= 3000 else len(material_info_df)
-    # material_info_df = material_info_df.sample(n=N, random_state=2077)
-
-    # material_info_df = pd.read_csv('merge_df_taote.csv')
-    # material_info_df = pd.read_csv('merge_df_zhifubao.csv')
-    # material_info_df = material_info_df[(~material_info_df.signature.isnull()) & (~material_info_df.url.isnull())]
-
-    # 2 获取已经提交的任务
-    # submit_task_sql = """select task_id, md5 signature from tb_asr_result """
-    # submit_task_df = pd.read_sql(submit_task_sql, test_engine)
-
-    # 3 需要提交的任务
-    # to_submit_task_df = material_info_df[~material_info_df.signature.isin(submit_task_df.signature.values)]
-
-    # 4 任务提交
-    # for index, row in to_submit_task_df.iterrows():
-    #     material_md5 = row['signature']
-    #     material_url = row['url']
-    #     print(material_md5, material_url)
-    #     request_data = {"md5": material_md5, "url": material_url}
-    #     # 表单形式的入参,不是 json 块入参,请求方式不一样
-    #     request_full_path = task_submit_url + '?' + urlencode(request_data)
-    #     request = requests.post(request_full_path)
-    #     try:
-    #         result = json.loads(request.text)
-    #         print(result)
-    #     except:
-    #         print("error", request.text)
-
-    # 5、任务获取
-    get_result_sql = """select task_id from tb_asr_result where  word_text is null"""
-    get_result_df = pd.read_sql(get_result_sql, test_engine)
-    for index, row in get_result_df.iterrows():
-        task_id = row['task_id']
-        request_data = {'task_id': task_id}
-        request_full_path = task_result_url + '?' + urlencode(request_data)
-        request = requests.post(request_full_path)
-        try:
-            result = json.loads(request.text)
-            print(task_id, result['status'])
-        except:
-            print("error", task_id, request.text)

get_material_from_out_source/__init__.py → get_material_and_script/__init__.py


+ 20 - 17
get_material_from_out_source/get_material_from_kuaishou_kaiyan.py

@@ -1,9 +1,11 @@
 import datetime
 import json
 import traceback
+
 import pandas as pd
 import requests
-from common_func import mysql_replace_into, NpEncoder, get_db_engine, get_logger
+
+from common_func import NpEncoder, get_db_engine, get_logger
 from config.url import get_video_info_from_kuaishou_kaiyan
 
 
@@ -12,13 +14,13 @@ class GetMaterialFromKuaishouKaiyan(object):
         self.query_word = query_word  # 查询词
         self.logger = logger
         self.db_engine = db_engine
+        self.video_df = pd.DataFrame()
 
     def get_video_basic_info(self):
         """
         依据关键词调用快手开眼,获取视频列表,得到视频基本信息如:视频时长、宽度、高度、标题、url链接等
         """
         # 1 请求快手开眼快创接口获取视频基本信息
-        video_df = pd.DataFrame()
         page_num = 1
         page_size = 10
         total_size = 0
@@ -47,7 +49,7 @@ class GetMaterialFromKuaishouKaiyan(object):
                     video_page_df = pd.DataFrame(response_data.get('data').get('inspiredAds'))
                     # 从 mainMvUrls 字段提取视频的播放链接
                     video_page_df['video_url'] = video_page_df['mainMvUrls'].apply(lambda x: x[0]['url'])
-                    video_df = video_df.append(video_page_df)
+                    self.video_df = self.video_df.append(video_page_df)
 
                     total_size = response_data.get('data').get('totalSize')
                 if page_num * page_size > total_size:
@@ -65,22 +67,23 @@ class GetMaterialFromKuaishouKaiyan(object):
         # mainMvUrls: list to  str
         # coverThumbnailUrls:  list to str
         # headUrls:  list to str
-        video_df[['mainMvUrls', 'coverThumbnailUrls', 'headUrls', 'photoId']] = \
-            video_df[['mainMvUrls', 'coverThumbnailUrls', 'headUrls', 'photoId']].astype(str)
+        if not self.video_df.empty:
+            self.video_df[['mainMvUrls', 'coverThumbnailUrls', 'headUrls', 'photoId']] = \
+                self.video_df[['mainMvUrls', 'coverThumbnailUrls', 'headUrls', 'photoId']].astype(str)
 
-        # 2-2 添加查询词和日期
-        video_df['query_word'] = self.query_word
-        video_df['stat_date'] = datetime.datetime.today().strftime('%Y-%m-%d')
+            # 2-2 添加查询词和日期
+            self.video_df['query_word'] = self.query_word
+            self.video_df['stat_date'] = datetime.datetime.today().strftime('%Y-%m-%d')
 
-        # 2-3 写入数据库, 表中以 photoId + query_word 作为联合唯一键,写入数据库时如果唯一键重复,则 replace_into
-        try:
-            video_df.to_sql(name="ctop_ai_material_info_from_kuaishou_kaiyan",
-                            con=self.db_engine,
-                            if_exists='append',
-                            index=False,
-                            chunksize=1000)
-        except:
-            self.logger.error(traceback.format_exc())
+            # 2-3 写入数据库, 表中以 photoId + query_word 作为联合唯一键,写入数据库时如果唯一键重复,则 replace_into
+            try:
+                self.video_df.to_sql(name="ctop_ai_material_info_from_kuaishou_kaiyan",
+                                     con=self.db_engine,
+                                     if_exists='append',
+                                     index=False,
+                                     chunksize=1000)
+            except:
+                self.logger.error(traceback.format_exc())
 
 
 if __name__ == '__main__':

+ 7 - 7
get_material_from_out_source/get_material_from_ocean_engine.py

@@ -15,6 +15,7 @@ class GetMaterialFromOceanEngine(object):
         self.logger = logger
         self.db_engine = db_engine
         self.signature_lst = None
+        self.video_df = pd.DataFrame()
 
     def get_material_basic_info(self):
         """
@@ -88,7 +89,6 @@ class GetMaterialFromOceanEngine(object):
         依据素材签名调用巨量引擎的接口,获取视频的基本信息,如视频时长、宽度、高度、url链接等
         """
         # 1 请求巨量引擎接口获取视频基本信息
-        video_df = pd.DataFrame()
         # 分次请求:每次请求的视频个数(为了提高数据获取的完整性,每次只请求10条数据)
         cnt_per_request = 10
         # 总的视频个数
@@ -117,7 +117,7 @@ class GetMaterialFromOceanEngine(object):
                         single_dict = value
                         single_dict['signature'] = key
                         single_df = pd.DataFrame([single_dict])
-                        video_df = video_df.append(single_df)
+                        self.video_df = self.video_df.append(single_df)
             except:
                 self.logger.error("查询词:%s, 调用头条巨量引擎视频接口 分页获取 %s,出现异常信息:%s" %
                                   (self.query_word, query_ids, traceback.format_exc()))
@@ -127,15 +127,15 @@ class GetMaterialFromOceanEngine(object):
         # 2 获取到的信息存入数据库
         # 2-1  数据类型的处理,方便入库
         # play_info: list to  str
-        video_df['play_info'] = video_df['play_info'].astype(str)
-        video_df.drop(labels='video_id', axis=1, inplace=True)
+        self.video_df['play_info'] = self.video_df['play_info'].astype(str)
+        self.video_df.drop(labels='video_id', axis=1, inplace=True)
 
         # 2-2 添加查询词和日期
-        video_df['query_word'] = self.query_word
-        video_df['stat_date'] = datetime.datetime.today().strftime('%Y-%m-%d')
+        self.video_df['query_word'] = self.query_word
+        self.video_df['stat_date'] = datetime.datetime.today().strftime('%Y-%m-%d')
 
         # 2-3 写入数据库, 表中以 signature + query_word 作为联合唯一键,写入数据库时如果唯一键重复,则 replace_into
-        video_df.to_sql(name="ctop_ai_video_info_from_ocean_engine",
+        self.video_df.to_sql(name="ctop_ai_video_info_from_ocean_engine",
                         con=self.db_engine,
                         if_exists='append',
                         index=False,

+ 183 - 0
get_material_and_script_by_query_word.py

@@ -0,0 +1,183 @@
+import datetime
+from datetime import timedelta
+
+import pandas as pd
+import yaml
+
+from common_func import get_db_engine, get_logger
+from config.url import toutiao_static_video_url
+from get_material_and_script.get_material_from_kuaishou_kaiyan import GetMaterialFromKuaishouKaiyan
+from get_material_and_script.get_material_from_ocean_engine import GetMaterialFromOceanEngine
+from get_script_from_tengxunyun import GetScriptFromTengXunYunServer
+
+if __name__ == '__main__':
+    # 创建日志对象
+    logger = get_logger(log_file_name="/data/pythonProject/video-to-word/logs/get_high_quality_material.log",
+                        log_name='get_high_quality_material_logger')
+    logger.info("get_high_quality_material_logger started! id of logger is: %s" % id(logger))
+
+    # 1 读取配置文件
+    with open('/data/pythonProject/video-to-word/config/config.yaml', mode='r', encoding='utf-8') as f:
+        config = yaml.load(f.read(), Loader=yaml.FullLoader)
+
+    # 1-1 数据库连接引擎,依据开发环境/生产环境 进行切换
+    # 读数据库引擎使用生产数据库,写数据库引擎依据系统环境进行切换(测试数据库/生产数据库)
+    # 注意:该项目的读和写 都使用测试数据库
+    # TODO 等数据库迁移,服务上线后需要依据环境进行切换
+    ai_word_engine = get_db_engine(config['ai_word_dev_db'])
+    jeecg_boot_engine = get_db_engine(config['jeecg_boot_product_db'])
+
+    # 1-2 分批写入数据库的行数
+    chunk_size = config['chunk_size']
+
+    # 1-3 渠道编码&名称
+    source_name = config['source_name_map']
+
+    # 2 读取查询表得到关键词和渠道
+    sql = """
+    select  query_word,
+            query_time_range,
+            is_new,
+            source_code_lst
+     from ctop_ai_query_word where status = 1
+    """
+    df = pd.read_sql(sql, ai_word_engine)
+    for index, row in df.iterrows():
+        query_word = row['query_word']
+        query_time_range = row['query_time_range']
+        is_new_project_query_word = row['is_new']
+        source_code_lst = eval(row['source_code_lst'])
+
+        # 2-1 获取内外部优质素材
+        full_high_material_df = pd.DataFrame()
+        writer = pd.ExcelWriter('/data/pythonProject/video-to-word/data/%s_内外部优质素材_%s.xlsx' %
+                                (query_word, datetime.date.today().strftime('%Y-%m-%d')))
+        for source_code in source_code_lst:
+            if source_code == 1:
+                # 获取公司内部投放素材
+                sql = """
+                select distinct(project_id) project_id from ctop_user_allocation where project_name  like '%%{query_word}%%'""".format(
+                    query_word=query_word)
+                project_id_lst = pd.read_sql(sql, jeecg_boot_engine).project_id.values
+                project_id_lst = tuple(project_id_lst) if len(project_id_lst) > 1 else tuple(project_id_lst * 2)
+                if project_id_lst:
+                    if is_new_project_query_word == 1:
+                        # 新增关键词,查询内部高质量素材的时间范围为3个月
+                        start_date = (datetime.date.today() + timedelta(days=-30 * 3)).strftime('%Y-%m-%d')
+                        full_date_df = pd.DataFrame()
+                        # TODO 测试 periods=3 非测试需要 periods=30 * 3
+                        for date in pd.date_range(start=start_date, freq='D', periods=30 * 3):
+                            stat_date = date.strftime('%Y-%m-%d')
+                            sql = """select signature, sum(activation) activation , sum(charge) charge,  stat_date
+                                                    from ctop_kuaishou_report_daily_material
+                                                    where account_id in (select account_id from ctop_user_allocation where project_id in %s)
+                                                    and stat_date = '%s'
+                                                    group by signature,stat_date""" % (project_id_lst, stat_date)
+                            one_date_df = pd.read_sql(sql, jeecg_boot_engine)
+                            full_date_df = full_date_df.append(one_date_df)
+                        # 过滤高质量素材(3个月内累计激活个数>=100)
+                        g = full_date_df.groupby('signature').agg({'activation': sum, 'charge': sum})
+                        g.reset_index(drop=False, inplace=True)
+                        high_material_df = g[g['activation'] >= 100]
+                        logger.info("新增查询词:%s, 内部高质量素材个数为 %s!" % (query_word, len(high_material_df)))
+                    else:
+                        # 历史已有关键词,查询内部高质量素材的的时间范围为7天
+                        start_date = (datetime.date.today() + timedelta(days=-7)).strftime('%Y-%m-%d')
+                        full_date_df = pd.DataFrame()
+                        for date in pd.date_range(start=start_date, freq='D', periods=7):
+                            stat_date = date.strftime('%Y-%m-%d')
+                            sql = """select signature, sum(activation) activation , sum(charge) charge,  stat_date
+                                                    from ctop_kuaishou_report_daily_material
+                                                    where account_id in (select account_id from ctop_user_allocation where project_id in %s)
+                                                    and stat_date = '%s'
+                                                    group by signature,stat_date""" % (project_id_lst, stat_date)
+                            one_date_df = pd.read_sql(sql, jeecg_boot_engine)
+                            full_date_df = full_date_df.append(one_date_df)
+
+                        # 获取高质量素材(近一周累计激活个数>=50)
+                        g = full_date_df.groupby('signature').agg({'activation': sum, 'charge': sum})
+                        g.reset_index(drop=False, inplace=True)
+                        high_material_df = g[g['activation'] >= 50]
+                        logger.info("历史查询词:%s, 内部高质量素材个数为 %s!" % (query_word, len(high_material_df)))
+                else:
+                    logger.info("查询词:%s, 内部没有对应的项目!" % query_word)
+
+                if not high_material_df.empty:
+                    # 获取素材的url
+                    sql = """select url video_url, signature from ctop_kuaishou_video_get 
+                             where account_id in (select account_id from ctop_user_allocation where project_id in %s)
+                             and signature in %s
+                             group by signature
+                                    """ % (project_id_lst, tuple(high_material_df.signature.values),)
+                    url_df = pd.read_sql(sql, jeecg_boot_engine)
+                    high_material_df = high_material_df.merge(url_df, on='signature', how='inner')
+                    high_material_df.loc[:, 'source_code'] = source_code  # 用于后续导出excel的sheet_name
+
+                    # 当前渠道的优质素材拼接到 full_high_material_df,用于后续统一获取脚本和导出excel
+                    full_high_material_df = full_high_material_df.append(high_material_df)
+
+            if source_code == 3:
+                # 获取快手开眼快创的优质素材
+                inst = GetMaterialFromKuaishouKaiyan(query_word=query_word, logger=logger, db_engine=ai_word_engine)
+                inst.get_video_basic_info()
+                if not inst.video_df.empty:
+                    high_material_df = inst.video_df[['photoId', 'video_url']]
+                    high_material_df.rename(columns={'photoId': 'signature'}, inplace=True)
+                    high_material_df.loc[:, 'source_code'] = source_code
+
+                    full_high_material_df = full_high_material_df.append(inst.video_df)
+                else:
+                    # TODO 添加日志
+                    pass
+
+            if source_code == 2:
+                # 获取头条巨量引擎的优质素材,需要使用参数 query_time_range
+                inst = GetMaterialFromOceanEngine(query_word=query_word,
+                                                  period_type=query_time_range,
+                                                  logger=logger,
+                                                  db_engine=ai_word_engine)
+                inst.get_material_basic_info()
+                inst.get_video_basic_info()
+                if not inst.video_df.empty:
+                    high_material_df = inst.video_df[['signature', 'video_url']]
+                    high_material_df.loc[:, 'source_code'] = source_code
+
+                    # 当前渠道的优质素材拼接到 full_high_material_df,用于后续统一获取脚本和导出excel
+                    full_high_material_df = full_high_material_df.append(high_material_df)
+                else:
+                    # TODO 添加日志
+                    pass
+
+        # 2-2 调用腾讯云的语音转脚本服务
+        full_high_material_df = full_high_material_df[~full_high_material_df.signature.isnull()]
+        get_script_ins = GetScriptFromTengXunYunServer(logger=logger,
+                                                       db_engine=ai_word_engine,
+                                                       task_df=full_high_material_df[['signature', 'video_url']],
+                                                       task_ids=None)
+        get_script_ins.submit_task()
+        if get_script_ins.task_ids:
+            get_script_ins.get_result()
+
+        # 2-3 从数据库获取脚本并导出excel文件
+        sql = """select md5 signature,task_result from tb_asr_result where word_text is not null and md5 in %s""" \
+              % (tuple(full_high_material_df.signature.values),)
+        script_df = pd.read_sql(sql, ai_word_engine)
+        script_df.drop_duplicates('signature', keep='first', inplace=True)
+        script_df['script'] = script_df['task_result'].apply(lambda x: eval(x)['Data']['Result'].split(']')[1].strip())
+
+        charge_script_df = script_df.merge(full_high_material_df, on='signature', how='inner')
+        charge_script_df.rename(columns={'charge': '消耗', 'script': '脚本'}, inplace=True)
+
+        # 头条的视频链接需要替换为永久链接
+        charge_script_df['video_url'] = charge_script_df.apply(
+            lambda item: toutiao_static_video_url + item['signature'] if item.get('source_code') == 2 else item['video_url'], axis=1)
+
+        for source_code in charge_script_df.source_code.unique():
+            charge_script_df[charge_script_df.source_code == source_code][['脚本', 'video_url', '消耗']].to_excel(writer,
+                                                                                                              sheet_name='%s_%s_优质素材' % (
+                                                                                                                  query_word,
+                                                                                                                  source_name[source_code]),
+                                                                                                              index=False,
+                                                                                                              header=True)
+        writer.save()
+        logger.info("查询词: %s,文件导出完成!" % query_word)

+ 0 - 50
get_material_from_out_source/get_material_and_script_from_out_source_by_query_word.py

@@ -1,50 +0,0 @@
-import pandas as pd
-import yaml
-# from get_material_from_kuaishou_kaiyan
-from common_func import get_db_engine, get_logger
-
-if __name__ == '__main__':
-    # 创建日志对象
-    logger = get_logger(log_file_name="/data/pythonProject/video-to-word/logs/get_material_from_outer_source.log",
-                        log_name='get_material_from_outer_source_logger')
-    logger.info("get_material_from_outer_source_logger started! id of logger is: %s" % id(logger))
-
-    # 1 读取配置文件
-    with open('/data/pythonProject/video-to-word/config/config.yaml', mode='r', encoding='utf-8') as f:
-        config = yaml.load(f.read(), Loader=yaml.FullLoader)
-
-    # 1-1 数据库连接引擎,依据开发环境/生产环境 进行切换
-    # 读数据库引擎使用生产数据库,写数据库引擎依据系统环境进行切换(测试数据库/生产数据库)
-    # 注意:该项目的读和写 都使用测试数据库
-    write_engine = get_db_engine(config['devDB'])
-    read_engine = get_db_engine(config['devDB'])
-
-    # 1-2 分批写入数据库的行数
-    chunk_size = config['chunkSize']
-
-    # 2 读取查询表得到关键词和渠道
-    sql = """
-    select  query_word,
-            query_word_type,
-            query_time_range,
-            is_new_project_query_word,
-            source_code_lst
-     from ctop_ai_query_word_for_outer_source where status = 1
-    """
-    df = pd.read_sql(sql, read_engine)
-    for index, row in df.iterrows():
-        query_word = row['row']
-        query_word_type = row['query_word_type']
-        query_time_range = row['query_time_range']
-        is_new_project_query_word = row['is_new_project_query_word']
-        source_code_lst = row['source_code_lst']
-        for source_code in source_code_lst:
-            if source_code == -1:
-                # 获取公司内部投放素材
-                pass
-            if source_code == 1:
-                # 获取快手开眼快创的素材
-                pass
-            if source_code == 2:
-                # 获取头条巨量引擎的素材,需要使用参数 query_time_range
-                pass

+ 1 - 1
get_script_from_tengxunyun.py

@@ -11,7 +11,7 @@ class GetScriptFromTengXunYunServer(object):
         self.logger = logger
         self.db_engine = db_engine
         self.task_ids = task_ids
-        self.task_df = task_df
+        self.task_df = task_df[~task_df.signature.isnull()]
 
     def submit_task(self):
         """

+ 0 - 177
get_video_from_ocean_engine.py

@@ -1,177 +0,0 @@
-import datetime
-import json
-import traceback
-from urllib.parse import urlencode
-import pandas as pd
-import requests
-import yaml
-from common_func import get_db_engine, mysql_replace_into, NpEncoder, get_logger
-from config.url import get_material_info_from_ocean_engine_url, get_video_info_from_ocean_engine_url
-from get_script_from_tengxunyun import GetScriptFromTengXunYunServer
-
-
-def get_material_info(project_name, period_type):
-    material_df = pd.DataFrame()  # 初始化返回的结果
-    has_more = True  # 是否还存在分页数据, 初始化为 True
-    limit = 10  # 每页获取10条
-    page = 1  # 第几页
-    while has_more:
-        request_data = {'list_type': 1,
-                        'material_type': 3,
-                        'order_by': 'click_show_rate',
-                        'period_type': period_type,
-                        'aggr_app_code': 4,
-                        'aggr_category_list': '[]',
-                        'video_type': '[]',
-                        'keywords': project_name,
-                        'landing_type': '[]',
-                        'limit': limit,
-                        'page': page,
-                        'video_duration_type': 5}
-
-        try:
-            request_path = get_material_info_from_ocean_engine_url + '?' + urlencode(request_data)
-            request = requests.get(request_path)
-            result = json.loads(request.text)
-            material_page_df = pd.DataFrame(result['data']['materials'])
-            material_df = material_df.append(material_page_df)
-            if result.get('code') == 0 and result.get('data').get('has_more') is True:
-                page += 1
-                logger.info("project_name:%s, get_material_info_from_ocean_engine 分页获取第 %s 页" % (project_name, page))
-            else:
-                has_more = False
-        except:
-            logger.error("project_name:%s, get_material_info_from_ocean_engine 分页获取时发生异常信息: %s" %
-                         (project_name, traceback.format_exc()))
-
-    logger.info("project_name:%s, get_material_info_from_ocean_engine 分页获取获取完成,共 %s 个物料信息!" %
-                (project_name, len(material_df)))
-
-    # 数据类型的处理,方便入库
-    # metrics dict to  str
-    # title list to str
-    # video_type list to str
-    # watermarks list to str
-    material_df[['metrics', 'title', 'video_type', 'watermarks']] = \
-        material_df[['metrics', 'title', 'video_type', 'watermarks']].astype(str)
-    material_df.rename(columns={'vid': 'signature'}, inplace=True)
-
-    # 添加项目名称和日期
-    material_df['project_name'] = project_name
-    material_df['stat_date'] = datetime.datetime.today().strftime('%Y-%m-%d')
-
-    # 写入数据库
-    material_df.to_sql(name="ctop_ai_material_info_from_ocean_engine",
-                       con=write_engine,
-                       if_exists='append',
-                       index=False,
-                       chunksize=chunk_size,
-                       method=mysql_replace_into)
-
-    return material_df
-
-
-def get_video_info(vid, project_name):
-    """
-    为了提高数据获取的完整性,每次只请求10条数据
-    :param vid:
-    :param project_name:
-    :return:
-    """
-    video_df = pd.DataFrame()
-    # 每次请求的视频个数
-    cnt_per_request = 10
-    # 总的视频个数
-    total_cnt = len(vid)
-    logger.info("project_name:%s, get_video_info_from_ocean_engine,共 %s 个视频需要请求接口获取video_url" % (project_name, total_cnt))
-    for i in range(0, total_cnt, cnt_per_request):
-        if i + cnt_per_request < total_cnt:
-            query_ids = vid[i: i + cnt_per_request]
-            logger.info("project_name:%s, get_video_info_from_ocean_engine 分页获取第 %s 个 到 %s 个视频信息" %
-                        (project_name, i, i + cnt_per_request))
-        else:
-            query_ids = vid[i:]
-            logger.info("project_name:%s, get_video_info_from_ocean_engine 分页获取第 %s 个 到 %s 个视频信息" %
-                        (project_name, i, total_cnt))
-
-        try:
-            request_data = {"query_ids": query_ids, "water_mark": "creative_center"}
-            request = requests.post(url=get_video_info_from_ocean_engine_url,
-                                    headers={'Content-Type': 'application/json'},
-                                    data=json.dumps(request_data, cls=NpEncoder)
-                                    )
-            response_data = json.loads(request.text)
-
-            if response_data.get('code') == 0 and response_data.get('data'):
-                for key, value in response_data['data'].items():
-                    single_dict = value
-                    single_dict['signature'] = key
-                    single_df = pd.DataFrame([single_dict])
-                    video_df = video_df.append(single_df)
-        except:
-            logger.error("project_name:%s, get_video_info_from_ocean_engine 分页获取 %s,出现异常信息:%s" %
-                         (project_name, query_ids, traceback.format_exc()))
-
-    # 数据类型的处理,方便入库
-    # play_info list to  str
-    video_df['play_info'] = video_df['play_info'].astype(str)
-    video_df.drop(labels='video_id', axis=1, inplace=True)
-
-    # 添加项目名称和日期
-    video_df['project_name'] = project_name
-    video_df['stat_date'] = datetime.datetime.today().strftime('%Y-%m-%d')
-
-    # 写入数据库
-    video_df.to_sql(name="ctop_ai_video_info_from_ocean_engine",
-                    con=write_engine,
-                    if_exists='append',
-                    index=False,
-                    chunksize=chunk_size,
-                    method=mysql_replace_into)
-
-    return video_df
-
-
-if __name__ == '__main__':
-    # 创建日志对象
-    logger = get_logger(log_file_name="/data/pythonProject/video-to-word/logs/get_video_from_ocean_engine.log",
-                        log_name='get_video_from_ocean_engine_logger')
-    logger.info("get_video_from_ocean_engine started! id of logger is: %s" % id(logger))
-
-    # 1 读取配置文件
-    with open('/data/pythonProject/video-to-word/config/config.yaml', mode='r', encoding='utf-8') as f:
-        config = yaml.load(f.read(), Loader=yaml.FullLoader)
-
-    # 1-1 数据库连接引擎,依据开发环境/生产环境 进行切换
-    # 读数据库引擎使用生产数据库,写数据库引擎依据系统环境进行切换(测试数据库/生产数据库)
-    # 注意:该项目的读和写 都使用测试数据库
-    write_engine = get_db_engine(config['devDB'])
-    read_engine = get_db_engine(config['devDB'])
-
-    # 1-2 分批写入数据库的行数
-    chunk_size = config['chunkSize']
-
-    # 1-3 读取项目列表
-    project_name_lst = config['projectName']
-
-    # 2  分项目获取巨量引擎数据
-    for project in project_name_lst:
-        try:
-            logger.info("****************************** %s 项目开始执行 ******************************" % project)
-            # 2-1 获取物料列表并写入数据库
-            material_info_df = get_material_info(project, 7)
-
-            # 2-2 根据 signature 获取 url并写入数据库
-            vid_lst = material_info_df['signature'].values
-            video_info_df = get_video_info(vid_lst, project)
-
-            # 2-3 向腾讯云提交语音转脚本的任务
-            task_df = video_info_df[['signature', 'video_url']]
-            get_script_ins = GetScriptFromTengXunYunServer(logger, read_engine, task_df, task_ids=None)
-            get_script_ins.submit_task()
-            if get_script_ins.task_ids:
-                get_script_ins.get_result()
-
-            logger.info("****************************** %s 项目执行完成 ******************************" % project)
-        except:
-            logger.error("project_name: %s, 发生异常信息: %s" % (project, traceback.format_exc()))