liyuyi@c-top.com.cn 4 年之前
父節點
當前提交
e121efe736

+ 14 - 1
common_func.py

@@ -2,6 +2,8 @@ from sqlalchemy import create_engine
 from urllib import parse
 import json
 import numpy as np
+import logging
+from concurrent_log import ConcurrentTimedRotatingFileHandler
 
 
 # 创建数据库连接引擎
@@ -42,4 +44,15 @@ class NpEncoder(json.JSONEncoder):
         elif isinstance(obj, np.ndarray):
             return obj.tolist()
         else:
-            return super(NpEncoder, self).default(obj)
+            return super(NpEncoder, self).default(obj)
+
+
+# 生成logger对象
+def get_logger(log_file_name, log_name):
+    log_formatter = logging.Formatter('%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s', '%Y/%m/%d %I:%M:%S %p')
+    log_handler = ConcurrentTimedRotatingFileHandler(filename=log_file_name, when="midnight", backupCount=100)
+    log_handler.setFormatter(log_formatter)
+    logger = logging.getLogger(log_name)
+    logger.addHandler(log_handler)
+    logger.setLevel(logging.DEBUG)  # 日志打印级别
+    return logger

+ 19 - 3
config/config.yaml

@@ -2,7 +2,23 @@ projectName:
   - '淘特'
   - '支付宝'
 
-# 生产数据库
+
+projectInfoForExportScript:
+  -
+      project_id: (458)
+      project_name: '淘特'
+      channel:
+        - 1 # 汇创思拓
+        - 0 # 巨量引擎
+  -
+      project_id: (67,123,42)
+      project_name: '支付宝'
+      channel:
+        - 1 # 汇创思拓
+        - 0 # 巨量引擎
+
+
+# 生产数据库 jeecg-boot
 productDB:
   host: 139.186.27.96
   username: data
@@ -10,7 +26,7 @@ productDB:
   port: 3390
   database: jeecg-boot
 
-# 开发测试数据库
+# 开发测试数据库 db_ai_word
 devDB:
   host: 139.186.165.84
   username: hcst
@@ -18,7 +34,7 @@ devDB:
   port: 3306
   database: db_ai_word
 
-# 本地数据库
+# 本地数据库 mysql
 localDB:
   host: 192.168.1.193
   username: root

二進制
data/支付宝项目近一周高质量&低质量脚本_2021-09-30.xlsx


二進制
data/淘特项目近一周高质量&低质量脚本_2021-09-30.xlsx


+ 123 - 0
get_high_quality_script.py

@@ -0,0 +1,123 @@
+import datetime
+from datetime import timedelta
+import pandas as pd
+import yaml
+from common_func import get_db_engine, get_logger
+from get_script_from_tengxunyun import GetScriptFromTengXunYunServer
+import traceback
+
+if __name__ == '__main__':
+    # 0 创建日志对象
+    logger = get_logger(log_file_name="/data/pythonProject/video-to-word/logs/get_high_quality_script.log",
+                        log_name='get_high_quality_script_logger')
+    logger.info("get_high_quality_script_logger started! id of logger is: %s" % id(logger))
+
+    # 1 读取配置文件,获取数据库链接信息
+    with open('config/config.yaml', mode='r', encoding='utf-8') as f:
+        config = yaml.load(f.read(), Loader=yaml.FullLoader)
+    online_read_engine = get_db_engine(config['productDB'])
+    test_read_engine = get_db_engine(config['devDB'])
+
+    # 2 获取素材信息
+    two_week_date = (datetime.date.today() + timedelta(days=-14)).strftime('%Y-%m-%d')
+    one_week_date = (datetime.date.today() + timedelta(days=-7)).strftime('%Y-%m-%d')
+    today = datetime.date.today().strftime('%Y-%m-%d')
+
+    project_info_lst = config['projectInfoForExportScript']
+    # 分项目进行遍历
+    for project_info in project_info_lst:
+        try:
+            project_id = project_info['project_id']
+            project_name = project_info['project_name']
+            channel = project_info['channel']
+            logger.info("项目:%s 开始执行!" % project_name)
+
+            writer = pd.ExcelWriter('/data/pythonProject/video-to-word/data/%s项目近一周高质量&低质量脚本_%s.xlsx' % (project_name, today))
+            if 1 in channel:
+                logger.info("项目:%s, 汇创思拓内部素材,开始执行!" % project_name)
+                # 2-1 获取执行项目两周内的素材日报数据 (单天获取后对数据进行拼接,防止一次性取不出数据)
+                full_date_df = pd.DataFrame()
+                for date in pd.date_range(start=two_week_date, freq='D', periods=14):
+                    stat_date = date.strftime('%Y-%m-%d')
+                    sql = """select signature, sum(activation) activation , sum(charge) charge,  stat_date
+                            from ctop_kuaishou_report_daily_material
+                            where account_id in (select account_id from ctop_user_allocation where project_id in %s)
+                            and stat_date = '%s'
+                            group by signature,stat_date""" % (project_id, stat_date)
+                    one_date_df = pd.read_sql(sql, online_read_engine)
+                    full_date_df = full_date_df.append(one_date_df)
+
+                # 2-2 获取高质量素材(近一周累计激活个数>=50)
+                one_week_df = full_date_df[full_date_df.stat_date >= one_week_date]
+                g = one_week_df.groupby('signature').agg({'activation': sum, 'charge': sum})
+                g.reset_index(drop=False, inplace=True)
+                high_material_df = g[g['activation'] >= 50]
+                logger.info("项目:%s, 高质量素材个数为 %s!" % (project_name, len(high_material_df)))
+
+                # 2-3 获取低质量素材(冷启动失败或衰退):上两周已经在投放,且近一周的累计激活个数<=10 & >=1个 -- 保证素材有一周的表现期
+                g = full_date_df.groupby('signature')
+                two_week_materials = g.filter(lambda x: x['stat_date'].min() <= one_week_date).signature.unique()
+                one_week_df = full_date_df[(full_date_df.signature.isin(two_week_materials)) & (full_date_df.stat_date >= one_week_date)]
+                g = one_week_df.groupby('signature').agg({'activation': sum, 'charge': sum})
+                g.reset_index(drop=False, inplace=True)
+                low_material_df = g[(g['activation'] <= 10) & (g['activation'] >= 1)]
+
+                # 对低质量素材进行抽样 100个
+                n = 100 if len(low_material_df) > 100 else len(low_material_df)
+                low_material_df = low_material_df.sample(n=n, random_state=2077)
+                logger.info("项目:%s, 低质量素材个数为 %s!" % (project_name, len(low_material_df)))
+
+                # 2-4 获取 video url
+                merge_df = pd.concat([low_material_df, high_material_df], axis=0)
+
+                sql = """
+                select url video_url, signature from ctop_kuaishou_video_get 
+                where account_id in (select account_id from ctop_user_allocation where project_id in %s)
+                and signature in %s
+                group by signature
+                """ % (project_id, tuple(merge_df.signature.values),)
+                url_df = pd.read_sql(sql, online_read_engine)
+
+                merge_df = merge_df.merge(url_df, on='signature', how='inner')
+
+                # 2-5 获取脚本数据
+                task_df = merge_df[['signature', 'video_url']]
+                get_script_ins = GetScriptFromTengXunYunServer(logger, test_read_engine, task_df, task_ids=None)
+                get_script_ins.submit_task()
+                if get_script_ins.task_ids:
+                    get_script_ins.get_result()
+
+                sql = """
+                select md5 signature,task_result from tb_asr_result where word_text is not null and md5 in %s""" \
+                      % (tuple(merge_df.signature.values),)
+                script_df = pd.read_sql(sql, test_read_engine)
+                script_df.drop_duplicates('signature', keep='first', inplace=True)
+                script_df['script'] = script_df['task_result'].apply(lambda x: eval(x)['Data']['Result'].split(']')[1].strip())
+
+                charge_script_df = script_df.merge(merge_df, on='signature', how='inner')
+                charge_script_df.rename(columns={'charge': '消耗', 'script': '脚本'}, inplace=True)
+
+                charge_script_df[charge_script_df.activation >= 50][['消耗', '脚本']]. \
+                    to_excel(writer, sheet_name='汇创思拓_%s项目近一周高质量素材脚本' % project_name, index=False, header=True)
+                charge_script_df[charge_script_df.activation <= 10][['消耗', '脚本']]. \
+                    to_excel(writer, sheet_name='汇创思拓_%s项目近一周低质量素材脚本' % project_name, index=False, header=True)
+
+            if 0 in channel:
+                logger.info("项目:%s, 巨量引擎外部素材,开始执行!" % project_name)
+                sql = """select signature  from ctop_ai_video_info_from_ocean_engine where project_name = '%s' and stat_date >= '%s'""" \
+                      % (project_name, one_week_date)
+                df = pd.read_sql(sql, test_read_engine)
+                logger.info("项目:%s, 巨量引擎优质素材个数为 %s!" % (project_name, len(df)))
+
+                sql = """select md5 signature,task_result from tb_asr_result where word_text is not null and md5 in %s""" \
+                      % (tuple(df.signature.values),)
+                script_df = pd.read_sql(sql, test_read_engine)
+                script_df.drop_duplicates('signature', keep='first', inplace=True)
+                script_df['script'] = script_df['task_result'].apply(lambda x: eval(x)['Data']['Result'].split(']')[1].strip())
+                script_df.rename(columns={'script': '脚本'}, inplace=True)
+
+                script_df[['脚本']].to_excel(writer, sheet_name='巨量引擎_%s项目近一周高质量素材脚本' % project_name, index=False, header=True)
+            writer.save()
+            logger.info("项目: %s,文件导出完成!" % project_name)
+        except:
+            logger.error("项目:%s, 发生异常信息为%s!" % (project_info['project_name'], traceback.format_exc()))

+ 75 - 0
get_script_from_tengxunyun.py

@@ -0,0 +1,75 @@
+import pandas as pd
+from urllib.parse import urlencode
+import requests
+import json
+import time
+from config.url import voice_to_script_task_submit_url, voice_to_script_task_result_url
+
+
+class GetScriptFromTengXunYunServer(object):
+    def __init__(self, logger, db_engine, task_df, task_ids=None):
+        self.logger = logger
+        self.db_engine = db_engine
+        self.task_ids = task_ids
+        self.task_df = task_df
+
+    def submit_task(self):
+        """
+        向腾讯云提交语音转脚本的任务
+        task_df: DataFrame columns 包含 signature 和 video_url
+        """
+
+        # 1 获取已经被提交过的任务
+        sql = """select md5 signature from tb_asr_result """
+        submitted_task_df = pd.read_sql(sql, self.db_engine)
+
+        # 2 需要提交的任务,去掉历史被提交过的任务,防止重复提交浪费服务时长
+        to_submit_task_df = self.task_df[~self.task_df.signature.isin(submitted_task_df.signature.values)]
+        self.logger.info("向腾讯云提交语音转脚本的任务个数为 %s" % len(to_submit_task_df))
+
+        # 3 发送请求,提交任务
+        for index, row in to_submit_task_df.iterrows():
+            material_md5 = row['signature']
+            material_url = row['video_url']
+            request_data = {"md5": material_md5, "url": material_url}
+            request_full_path = voice_to_script_task_submit_url + '?' + urlencode(request_data)
+            request = requests.post(request_full_path)
+            try:
+                result = json.loads(request.text)
+                self.logger.info("素材:%s, 向腾讯云提交语音转脚本任务的返回信息为:%s " % (material_md5, result))
+            except:
+                self.logger.error("素材:%s, 向腾讯云提交语音转脚本任务的返回信息为:%s " % (material_md5, request.text))
+
+        # 4 获取素材对应的发送请求的 task_id
+        sql = """select task_id from tb_asr_result where md5 in %s""" % (tuple(self.task_df.signature.values),)
+        task_id_df = pd.read_sql(sql, self.db_engine)
+        self.task_ids = list(task_id_df['task_id'].values)
+
+    def get_result(self):
+        """
+        从腾讯云获取脚本
+        每隔1分钟获取一次,直到没有 执行中或者等待执行 的任务为止
+        Status Integer 任务状态码,0:任务等待,1:任务执行中,2:任务成功,3:任务失败。
+        StatusStr String 任务状态,waiting:任务等待,doing:任务执行中,success:任务成功,failed:任务失败。
+        ErrorMsg String 失败原因说明。
+        """
+        self.logger.info("从腾讯云获取脚本的个数为%s" % len(self.task_ids))
+        while True:
+            sql = """select task_id, task_status from tb_asr_result where task_id in %s and task_status in (0,1)""" % \
+                  (tuple(self.task_ids),)
+            task_status_df = pd.read_sql(sql, self.db_engine)
+            if task_status_df.empty:
+                break
+            else:
+                self.logger.info("从腾讯云获取脚本, 休眠一分钟,等待腾讯云任务计算完成。")
+                time.sleep(60 * 1)
+
+            for task_id in task_status_df.task_id.values:
+                request_data = {'task_id': task_id}
+                request_full_path = voice_to_script_task_result_url + '?' + urlencode(request_data)
+                request = requests.post(request_full_path)
+                try:
+                    result = json.loads(request.text)
+                    self.logger.info("task_id:%s, 从腾讯云获取脚本返回信息为:%s " % (task_id, result))
+                except:
+                    self.logger.error("task_id:%s, 从腾讯云获取脚本返回信息为:%s " % (task_id, request.text))

+ 72 - 113
get_video_from_ocean_engine.py

@@ -4,17 +4,17 @@ from urllib.parse import urlencode
 import pandas as pd
 import requests
 import yaml
-from common_func import get_db_engine, mysql_replace_into, NpEncoder
+from common_func import get_db_engine, mysql_replace_into, NpEncoder, get_logger
 import datetime
-from config.url import voice_to_script_task_submit_url, voice_to_script_task_result_url, get_material_info_from_ocean_engine_url, \
-    get_video_info_from_ocean_engine_url
-import time
+from config.url import get_material_info_from_ocean_engine_url, get_video_info_from_ocean_engine_url
+import traceback
+from get_script_from_tengxunyun import GetScriptFromTengXunYunServer
 
 
 def get_material_info(project_name, period_type):
-    material_df = pd.DataFrame()
+    material_df = pd.DataFrame()  # 初始化返回的结果
     has_more = True  # 是否还存在分页数据, 初始化为 True
-    limit = 10  # 每页获取30条
+    limit = 10  # 每页获取10条
     page = 1  # 第几页
     while has_more:
         request_data = {'list_type': 1,
@@ -30,15 +30,23 @@ def get_material_info(project_name, period_type):
                         'page': page,
                         'video_duration_type': 5}
 
-        request_path = get_material_info_from_ocean_engine_url + '?' + urlencode(request_data)
-        request = requests.get(request_path)
-        result = json.loads(request.text)
-        material_page_df = pd.DataFrame(result['data']['materials'])
-        material_df = material_df.append(material_page_df)
-        if result.get('code') == 0 and result.get('data').get('has_more') is True:
-            page += 1
-        else:
-            has_more = False
+        try:
+            request_path = get_material_info_from_ocean_engine_url + '?' + urlencode(request_data)
+            request = requests.get(request_path)
+            result = json.loads(request.text)
+            material_page_df = pd.DataFrame(result['data']['materials'])
+            material_df = material_df.append(material_page_df)
+            if result.get('code') == 0 and result.get('data').get('has_more') is True:
+                page += 1
+                logger.info("project_name:%s, get_material_info_from_ocean_engine 分页获取第 %s 页" % (project_name, page))
+            else:
+                has_more = False
+        except:
+            logger.error("project_name:%s, get_material_info_from_ocean_engine 分页获取时发生异常信息: %s" %
+                         (project_name, traceback.format_exc()))
+
+    logger.info("project_name:%s, get_material_info_from_ocean_engine 分页获取获取完成,共 %s 个物料信息!" %
+                (project_name, len(material_df)))
 
     # 数据类型的处理,方便入库
     # metrics dict to  str
@@ -71,31 +79,39 @@ def get_video_info(vid, project_name):
     :param project_name:
     :return:
     """
-
     video_df = pd.DataFrame()
     # 每次请求的视频个数
     cnt_per_request = 10
     # 总的视频个数
     total_cnt = len(vid)
+    logger.info("project_name:%s, get_video_info_from_ocean_engine,共 %s 个视频需要请求接口获取video_url" % (project_name, total_cnt))
     for i in range(0, total_cnt, cnt_per_request):
         if i + cnt_per_request < total_cnt:
             query_ids = vid[i: i + cnt_per_request]
+            logger.info("project_name:%s, get_video_info_from_ocean_engine 分页获取第 %s 个 到 %s 个视频信息" %
+                        (project_name, i, i + cnt_per_request))
         else:
             query_ids = vid[i:]
+            logger.info("project_name:%s, get_video_info_from_ocean_engine 分页获取第 %s 个 到 %s 个视频信息" %
+                        (project_name, i, total_cnt))
 
-        request_data = {"query_ids": query_ids, "water_mark": "creative_center"}
-        request = requests.post(url=get_video_info_from_ocean_engine_url,
-                                headers={'Content-Type': 'application/json'},
-                                data=json.dumps(request_data, cls=NpEncoder)
-                                )
-        response_data = json.loads(request.text)
-
-        if response_data.get('code') == 0 and response_data.get('data'):
-            for key, value in response_data['data'].items():
-                single_dict = value
-                single_dict['signature'] = key
-                single_df = pd.DataFrame([single_dict])
-                video_df = video_df.append(single_df)
+        try:
+            request_data = {"query_ids": query_ids, "water_mark": "creative_center"}
+            request = requests.post(url=get_video_info_from_ocean_engine_url,
+                                    headers={'Content-Type': 'application/json'},
+                                    data=json.dumps(request_data, cls=NpEncoder)
+                                    )
+            response_data = json.loads(request.text)
+
+            if response_data.get('code') == 0 and response_data.get('data'):
+                for key, value in response_data['data'].items():
+                    single_dict = value
+                    single_dict['signature'] = key
+                    single_df = pd.DataFrame([single_dict])
+                    video_df = video_df.append(single_df)
+        except:
+            logger.error("project_name:%s, get_video_info_from_ocean_engine 分页获取 %s,出现异常信息:%s" %
+                         (project_name, query_ids, traceback.format_exc()))
 
     # 数据类型的处理,方便入库
     # play_info list to  str
@@ -117,86 +133,23 @@ def get_video_info(vid, project_name):
     return video_df
 
 
-def submit_script_task(df):
-    """
-    向腾讯云提交语音转脚本的任务
-    :param df: DataFrame columns 包含 signature 和 url
-    :return: task_ids
-    """
-    # 1 获取已经被提交过的任务
-    sql = """select md5 signature from tb_asr_result """
-    submitted_task_df = pd.read_sql(sql, read_engine)
-
-    # 2 需要提交的任务,去掉历史被提交过的任务,防止重复提交浪费服务时长
-    to_submit_task_df = df[~df.signature.isin(submitted_task_df.signature.values)]
-
-    # 3 发送请求,提交任务
-    for index, row in to_submit_task_df.iterrows():
-        material_md5 = row['signature']
-        material_url = row['video_url']
-        request_data = {"md5": material_md5, "url": material_url}
-        request_full_path = voice_to_script_task_submit_url + '?' + urlencode(request_data)
-        request = requests.post(request_full_path)
-        try:
-            result = json.loads(request.text)
-            print(result)
-        except:
-            print("error", request.text)
-
-    # 4 获取素材对应的发送请求的 task_id
-    sql = """
-    select task_id from tb_asr_result where md5 in %s
-    """ % (tuple(df.signature.values),)
-    task_id_df = pd.read_sql(sql, read_engine)
-    task_ids = task_id_df['task_id'].values
-    return task_ids
-
-
-def get_result_from_tx(task_id_lst):
-    """
-    从腾讯云获取脚本
-    每隔5分钟获取一次,直到没有 执行中或者等待执行 的任务为止
-
-    Status Integer 任务状态码,0:任务等待,1:任务执行中,2:任务成功,3:任务失败。
-    StatusStr String 任务状态,waiting:任务等待,doing:任务执行中,success:任务成功,failed:任务失败。
-    ErrorMsg String 失败原因说明。
-    """
-    while True:
-        print("sleep 5 mins")
-        time.sleep(60 * 5)
-
-        sql = """select task_id, task_status from tb_asr_result where task_id in %s and task_status in (0,1)""" % (tuple(task_id_lst),)
-        task_status_df = pd.read_sql(sql, read_engine)
-        if task_status_df.empty:
-            break
-
-        for task_id in task_status_df.task_id.values:
-            request_data = {'task_id': task_id}
-            request_full_path = voice_to_script_task_result_url + '?' + urlencode(request_data)
-            request = requests.post(request_full_path)
-            try:
-                result = json.loads(request.text)
-                print(task_id, result['status'])
-            except:
-                print("error", task_id, request.text)
-
-
 if __name__ == '__main__':
+    # 创建日志对象
+    logger = get_logger(log_file_name="/data/pythonProject/video-to-word/logs/get_video_from_ocean_engine.log",
+                        log_name='get_video_from_ocean_engine_logger')
+    logger.info("get_video_from_ocean_engine started! id of logger is: %s" % id(logger))
+
     # 1 读取配置文件
     with open('config/config.yaml', mode='r', encoding='utf-8') as f:
         config = yaml.load(f.read(), Loader=yaml.FullLoader)
 
     # 1-1 数据库连接引擎,依据开发环境/生产环境 进行切换
     # 读数据库引擎使用生产数据库,写数据库引擎依据系统环境进行切换(测试数据库/生产数据库)
-    # 注意: 该项目的读和写 都使用测试数据库
-    if os.getenv('LYY_DEV', 'unknown') == 'dev':
-        write_engine = get_db_engine(config['devDB'])
-    else:
-        write_engine = get_db_engine(config['devDB'])
-
+    # 注意:该项目的读和写 都使用测试数据库
+    write_engine = get_db_engine(config['devDB'])
     read_engine = get_db_engine(config['devDB'])
 
-    # 1-2 每次写入数据库的行数
+    # 1-2 分批写入数据库的行数
     chunk_size = config['chunkSize']
 
     # 1-3 读取项目列表
@@ -204,16 +157,22 @@ if __name__ == '__main__':
 
     # 2  分项目获取巨量引擎数据
     for project in project_name_lst:
-        # 2-1 获取物料列表并写入数据库
-        material_info_df = get_material_info(project, 7)
-
-        # 2-2 根据 signature 获取 url 并写入数据库
-        vid_lst = material_info_df['signature'].values
-        video_info_df = get_video_info(vid_lst, project)
-
-        # 2-3 向腾讯云提交语音转脚本的任务
-        task_df = video_info_df[['signature', 'video_url']]
-        task_ids = submit_script_task(task_df)
-
-        # 2-4 向腾讯云获取已提交任务的脚本
-        get_result_from_tx(task_ids)
+        try:
+            logger.info("****************************** %s 项目开始执行 ******************************" % project)
+            # 2-1 获取物料列表并写入数据库
+            material_info_df = get_material_info(project, 7)
+
+            # 2-2 根据 signature 获取 url并写入数据库
+            vid_lst = material_info_df['signature'].values
+            video_info_df = get_video_info(vid_lst, project)
+
+            # 2-3 向腾讯云提交语音转脚本的任务
+            task_df = video_info_df[['signature', 'video_url']]
+            get_script_ins = GetScriptFromTengXunYunServer(logger, read_engine, task_df, task_ids=None)
+            get_script_ins.submit_task()
+            if get_script_ins.task_ids:
+                get_script_ins.get_result()
+
+            logger.info("****************************** %s 项目执行完成 ******************************" % project)
+        except:
+            logger.error("project_name: %s, 发生异常信息: %s" % (project, traceback.format_exc()))

+ 0 - 0
logs/.logtest