liyuyi@c-top.com.cn 4 년 전
부모
커밋
ff8607ece6
4개의 변경된 파일18개의 추가작업 그리고 132개의 파일을 삭제
  1. 0 0
      .idea/video_to_word.iml
  2. 0 125
      get_high_quality_script.py
  3. 6 2
      get_script_from_tengxunyun.py
  4. 12 5
      get_material_and_script_by_query_word.py

.idea/video-to-word.iml → .idea/video_to_word.iml


+ 0 - 125
get_high_quality_script.py

@@ -1,125 +0,0 @@
-import datetime
-from datetime import timedelta
-import pandas as pd
-import yaml
-from common_func import get_db_engine, get_logger
-from get_script_from_tengxunyun import GetScriptFromTengXunYunServer
-import traceback
-
-if __name__ == '__main__':
-    # 0 创建日志对象
-    logger = get_logger(log_file_name="/data/pythonProject/video-to-word/logs/get_high_quality_script.log",
-                        log_name='get_high_quality_script_logger')
-    logger.info("get_high_quality_script_logger started! id of logger is: %s" % id(logger))
-
-    # 1 读取配置文件,获取数据库链接信息
-    with open('/data/pythonProject/video-to-word/config/config.yaml', mode='r', encoding='utf-8') as f:
-        config = yaml.load(f.read(), Loader=yaml.FullLoader)
-    online_read_engine = get_db_engine(config['productDB'])
-    test_read_engine = get_db_engine(config['devDB'])
-
-    # 2 获取素材信息
-    two_week_date = (datetime.date.today() + timedelta(days=-14)).strftime('%Y-%m-%d')
-    one_week_date = (datetime.date.today() + timedelta(days=-7)).strftime('%Y-%m-%d')
-    today = datetime.date.today().strftime('%Y-%m-%d')
-
-    project_info_lst = config['projectInfoForExportScript']
-    # 分项目进行遍历
-    for project_info in project_info_lst:
-        try:
-            project_id = project_info['project_id']
-            project_name = project_info['project_name']
-            channel = project_info['channel']
-            logger.info("项目:%s 开始执行!" % project_name)
-
-            writer = pd.ExcelWriter('/data/pythonProject/video-to-word/data/%s项目近一周高质量&低质量脚本_%s.xlsx' % (project_name, today))
-            if 1 in channel:
-                logger.info("项目:%s, 汇创思拓内部素材,开始执行!" % project_name)
-                # 2-1 获取执行项目两周内的素材日报数据 (单天获取后对数据进行拼接,防止一次性取不出数据)
-                full_date_df = pd.DataFrame()
-                for date in pd.date_range(start=two_week_date, freq='D', periods=14):
-                    stat_date = date.strftime('%Y-%m-%d')
-                    sql = """select signature, sum(activation) activation , sum(charge) charge,  stat_date
-                            from ctop_kuaishou_report_daily_material
-                            where account_id in (select account_id from ctop_user_allocation where project_id in %s)
-                            and stat_date = '%s'
-                            group by signature,stat_date""" % (project_id, stat_date)
-                    one_date_df = pd.read_sql(sql, online_read_engine)
-                    full_date_df = full_date_df.append(one_date_df)
-
-                # 2-2 获取高质量素材(近一周累计激活个数>=50)
-                one_week_df = full_date_df[full_date_df.stat_date >= one_week_date]
-                g = one_week_df.groupby('signature').agg({'activation': sum, 'charge': sum})
-                g.reset_index(drop=False, inplace=True)
-                high_material_df = g[g['activation'] >= 50]
-                logger.info("项目:%s, 高质量素材个数为 %s!" % (project_name, len(high_material_df)))
-
-                # 2-3 获取低质量素材(冷启动失败或衰退):上两周已经在投放,且近一周的累计激活个数<=10 & >=1个 -- 保证素材有一周的表现期
-                g = full_date_df.groupby('signature')
-                two_week_materials = g.filter(lambda x: x['stat_date'].min() <= one_week_date).signature.unique()
-                one_week_df = full_date_df[(full_date_df.signature.isin(two_week_materials)) & (full_date_df.stat_date >= one_week_date)]
-                g = one_week_df.groupby('signature').agg({'activation': sum, 'charge': sum})
-                g.reset_index(drop=False, inplace=True)
-                low_material_df = g[(g['activation'] <= 10) & (g['activation'] >= 1)]
-
-                # 对低质量素材进行抽样 100个
-                n = 100 if len(low_material_df) > 100 else len(low_material_df)
-                low_material_df = low_material_df.sample(n=n, random_state=2077)
-                logger.info("项目:%s, 低质量素材个数为 %s!" % (project_name, len(low_material_df)))
-
-                # 2-4 获取 video url
-                merge_df = pd.concat([low_material_df, high_material_df], axis=0)
-
-                sql = """
-                select url video_url, signature from ctop_kuaishou_video_get 
-                where account_id in (select account_id from ctop_user_allocation where project_id in %s)
-                and signature in %s
-                group by signature
-                """ % (project_id, tuple(merge_df.signature.values),)
-                url_df = pd.read_sql(sql, online_read_engine)
-
-                merge_df = merge_df.merge(url_df, on='signature', how='inner')
-
-                # 2-5 获取脚本数据
-                task_df = merge_df[['signature', 'video_url']]
-                get_script_ins = GetScriptFromTengXunYunServer(logger, test_read_engine, task_df, task_ids=None)
-                get_script_ins.submit_task()
-                if get_script_ins.task_ids:
-                    get_script_ins.get_result()
-
-                sql = """
-                select md5 signature,task_result from tb_asr_result where word_text is not null and md5 in %s""" \
-                      % (tuple(merge_df.signature.values),)
-                script_df = pd.read_sql(sql, test_read_engine)
-                script_df.drop_duplicates('signature', keep='first', inplace=True)
-                script_df['script'] = script_df['task_result'].apply(lambda x: eval(x)['Data']['Result'].split(']')[1].strip())
-
-                charge_script_df = script_df.merge(merge_df, on='signature', how='inner')
-                charge_script_df.rename(columns={'charge': '消耗', 'script': '脚本'}, inplace=True)
-
-                charge_script_df[charge_script_df.activation >= 50][['消耗', '脚本', 'video_url']]. \
-                    to_excel(writer, sheet_name='汇创思拓_%s项目近一周高质量素材脚本' % project_name, index=False, header=True)
-                charge_script_df[charge_script_df.activation <= 10][['消耗', '脚本', 'video_url']]. \
-                    to_excel(writer, sheet_name='汇创思拓_%s项目近一周低质量素材脚本' % project_name, index=False, header=True)
-
-            if 0 in channel:
-                logger.info("项目:%s, 巨量引擎外部素材,开始执行!" % project_name)
-                sql = """select signature, video_url  from ctop_ai_video_info_from_ocean_engine 
-                where project_name = '%s' and stat_date >= '%s'""" % (project_name, one_week_date)
-                df = pd.read_sql(sql, test_read_engine)
-                logger.info("项目:%s, 巨量引擎优质素材个数为 %s!" % (project_name, len(df)))
-
-                sql = """select md5 signature,task_result from tb_asr_result where word_text is not null and md5 in %s""" \
-                      % (tuple(df.signature.values),)
-                script_df = pd.read_sql(sql, test_read_engine)
-                script_df.drop_duplicates('signature', keep='first', inplace=True)
-                script_df['script'] = script_df['task_result'].apply(lambda x: eval(x)['Data']['Result'].split(']')[1].strip())
-                script_df.rename(columns={'script': '脚本'}, inplace=True)
-                merge_df = script_df.merge(df, on='signature', how='inner')
-                merge_df[['脚本', 'video_url']].to_excel(writer, sheet_name='巨量引擎_%s项目近一周高质量素材脚本' % project_name,
-                                                       index=False,
-                                                       header=True)
-            writer.save()
-            logger.info("项目: %s,文件导出完成!" % project_name)
-        except:
-            logger.error("项目:%s, 发生异常信息为%s!" % (project_info['project_name'], traceback.format_exc()))

+ 6 - 2
get_script_from_tengxunyun.py

@@ -54,14 +54,16 @@ class GetScriptFromTengXunYunServer(object):
         ErrorMsg String 失败原因说明。
         """
         self.logger.info("从腾讯云获取脚本的个数为%s" % len(self.task_ids))
-        while True:
+        retry_upper_cnt = 10
+        retry_cnt = 1
+        while retry_cnt <= retry_upper_cnt:
             sql = """select task_id, task_status from tb_asr_result where task_id in %s and task_status in (0,1)""" % \
                   (tuple(self.task_ids),)
             task_status_df = pd.read_sql(sql, self.db_engine)
             if task_status_df.empty:
                 break
             else:
-                self.logger.info("从腾讯云获取脚本, 休眠一分钟,等待腾讯云任务计算完成。")
+                self.logger.info("从腾讯云获取脚本, 休眠10s,等待腾讯云任务计算完成。")
                 time.sleep(10 * 1)
 
             for task_id in task_status_df.task_id.values:
@@ -73,3 +75,5 @@ class GetScriptFromTengXunYunServer(object):
                     self.logger.info("task_id:%s, 从腾讯云获取脚本返回信息为:%s " % (task_id, result))
                 except:
                     self.logger.error("task_id:%s, 从腾讯云获取脚本返回信息为:%s " % (task_id, request.text))
+
+            retry_cnt += 1

+ 12 - 5
get_material_and_script_by_query_word.py

@@ -1,16 +1,23 @@
 import datetime
+import os
+import sys
 from datetime import timedelta
 
 import pandas as pd
 import yaml
 
+curr_path = os.path.abspath(os.path.dirname(__file__))
+project_root_path = curr_path[:curr_path.find("video-to-word") + len("video-to-word")]
+sys.path.append(project_root_path)
+
 from common_func import get_db_engine, get_logger
 from config.url import toutiao_static_video_url
 from get_material_and_script.get_material_from_kuaishou_kaiyan import GetMaterialFromKuaishouKaiyan
 from get_material_and_script.get_material_from_ocean_engine import GetMaterialFromOceanEngine
-from get_script_from_tengxunyun import GetScriptFromTengXunYunServer
+from get_material_and_script.get_script_from_tengxunyun import GetScriptFromTengXunYunServer
 
 if __name__ == '__main__':
+    print("test end")
     # 创建日志对象
     logger = get_logger(log_file_name="/data/pythonProject/video-to-word/logs/get_high_quality_material.log",
                         log_name='get_high_quality_material_logger')
@@ -63,10 +70,10 @@ if __name__ == '__main__':
                 if project_id_lst:
                     if is_new_project_query_word == 1:
                         # 新增关键词,查询内部高质量素材的时间范围为3个月
-                        start_date = (datetime.date.today() + timedelta(days=-30 * 3)).strftime('%Y-%m-%d')
+                        # TODO periods=18 需要修改
+                        start_date = (datetime.date.today() + timedelta(days=-18)).strftime('%Y-%m-%d')
                         full_date_df = pd.DataFrame()
-                        # TODO 测试 periods=3 非测试需要 periods=30 * 3
-                        for date in pd.date_range(start=start_date, freq='D', periods=30 * 3):
+                        for date in pd.date_range(start=start_date, freq='D', periods=18):
                             stat_date = date.strftime('%Y-%m-%d')
                             sql = """select signature, sum(activation) activation , sum(charge) charge,  stat_date
                                                     from ctop_kuaishou_report_daily_material
@@ -128,7 +135,7 @@ if __name__ == '__main__':
                     full_high_material_df = full_high_material_df.append(inst.video_df)
                 else:
                     # TODO 添加日志
-                    pass
+                    logger.info("查询词:%s,快手开眼快创没有对应的素材!" % query_word)
 
             if source_code == 2:
                 # 获取头条巨量引擎的优质素材,需要使用参数 query_time_range