| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125 | 
							- import datetime
 
- from datetime import timedelta
 
- import pandas as pd
 
- import yaml
 
- from common_func import get_db_engine, get_logger
 
- from get_script_from_tengxunyun import GetScriptFromTengXunYunServer
 
- import traceback
 
- if __name__ == '__main__':
 
-     # 0 创建日志对象
 
-     logger = get_logger(log_file_name="/data/pythonProject/video-to-word/logs/get_high_quality_script.log",
 
-                         log_name='get_high_quality_script_logger')
 
-     logger.info("get_high_quality_script_logger started! id of logger is: %s" % id(logger))
 
-     # 1 读取配置文件,获取数据库链接信息
 
-     with open('/data/pythonProject/video-to-word/config/config.yaml', mode='r', encoding='utf-8') as f:
 
-         config = yaml.load(f.read(), Loader=yaml.FullLoader)
 
-     online_read_engine = get_db_engine(config['productDB'])
 
-     test_read_engine = get_db_engine(config['devDB'])
 
-     # 2 获取素材信息
 
-     two_week_date = (datetime.date.today() + timedelta(days=-14)).strftime('%Y-%m-%d')
 
-     one_week_date = (datetime.date.today() + timedelta(days=-7)).strftime('%Y-%m-%d')
 
-     today = datetime.date.today().strftime('%Y-%m-%d')
 
-     project_info_lst = config['projectInfoForExportScript']
 
-     # 分项目进行遍历
 
-     for project_info in project_info_lst:
 
-         try:
 
-             project_id = project_info['project_id']
 
-             project_name = project_info['project_name']
 
-             channel = project_info['channel']
 
-             logger.info("项目:%s 开始执行!" % project_name)
 
-             writer = pd.ExcelWriter('/data/pythonProject/video-to-word/data/%s项目近一周高质量&低质量脚本_%s.xlsx' % (project_name, today))
 
-             if 1 in channel:
 
-                 logger.info("项目:%s, 汇创思拓内部素材,开始执行!" % project_name)
 
-                 # 2-1 获取执行项目两周内的素材日报数据 (单天获取后对数据进行拼接,防止一次性取不出数据)
 
-                 full_date_df = pd.DataFrame()
 
-                 for date in pd.date_range(start=two_week_date, freq='D', periods=14):
 
-                     stat_date = date.strftime('%Y-%m-%d')
 
-                     sql = """select signature, sum(activation) activation , sum(charge) charge,  stat_date
 
-                             from ctop_kuaishou_report_daily_material
 
-                             where account_id in (select account_id from ctop_user_allocation where project_id in %s)
 
-                             and stat_date = '%s'
 
-                             group by signature,stat_date""" % (project_id, stat_date)
 
-                     one_date_df = pd.read_sql(sql, online_read_engine)
 
-                     full_date_df = full_date_df.append(one_date_df)
 
-                 # 2-2 获取高质量素材(近一周累计激活个数>=50)
 
-                 one_week_df = full_date_df[full_date_df.stat_date >= one_week_date]
 
-                 g = one_week_df.groupby('signature').agg({'activation': sum, 'charge': sum})
 
-                 g.reset_index(drop=False, inplace=True)
 
-                 high_material_df = g[g['activation'] >= 50]
 
-                 logger.info("项目:%s, 高质量素材个数为 %s!" % (project_name, len(high_material_df)))
 
-                 # 2-3 获取低质量素材(冷启动失败或衰退):上两周已经在投放,且近一周的累计激活个数<=10 & >=1个 -- 保证素材有一周的表现期
 
-                 g = full_date_df.groupby('signature')
 
-                 two_week_materials = g.filter(lambda x: x['stat_date'].min() <= one_week_date).signature.unique()
 
-                 one_week_df = full_date_df[(full_date_df.signature.isin(two_week_materials)) & (full_date_df.stat_date >= one_week_date)]
 
-                 g = one_week_df.groupby('signature').agg({'activation': sum, 'charge': sum})
 
-                 g.reset_index(drop=False, inplace=True)
 
-                 low_material_df = g[(g['activation'] <= 10) & (g['activation'] >= 1)]
 
-                 # 对低质量素材进行抽样 100个
 
-                 n = 100 if len(low_material_df) > 100 else len(low_material_df)
 
-                 low_material_df = low_material_df.sample(n=n, random_state=2077)
 
-                 logger.info("项目:%s, 低质量素材个数为 %s!" % (project_name, len(low_material_df)))
 
-                 # 2-4 获取 video url
 
-                 merge_df = pd.concat([low_material_df, high_material_df], axis=0)
 
-                 sql = """
 
-                 select url video_url, signature from ctop_kuaishou_video_get 
 
-                 where account_id in (select account_id from ctop_user_allocation where project_id in %s)
 
-                 and signature in %s
 
-                 group by signature
 
-                 """ % (project_id, tuple(merge_df.signature.values),)
 
-                 url_df = pd.read_sql(sql, online_read_engine)
 
-                 merge_df = merge_df.merge(url_df, on='signature', how='inner')
 
-                 # 2-5 获取脚本数据
 
-                 task_df = merge_df[['signature', 'video_url']]
 
-                 get_script_ins = GetScriptFromTengXunYunServer(logger, test_read_engine, task_df, task_ids=None)
 
-                 get_script_ins.submit_task()
 
-                 if get_script_ins.task_ids:
 
-                     get_script_ins.get_result()
 
-                 sql = """
 
-                 select md5 signature,task_result from tb_asr_result where word_text is not null and md5 in %s""" \
 
-                       % (tuple(merge_df.signature.values),)
 
-                 script_df = pd.read_sql(sql, test_read_engine)
 
-                 script_df.drop_duplicates('signature', keep='first', inplace=True)
 
-                 script_df['script'] = script_df['task_result'].apply(lambda x: eval(x)['Data']['Result'].split(']')[1].strip())
 
-                 charge_script_df = script_df.merge(merge_df, on='signature', how='inner')
 
-                 charge_script_df.rename(columns={'charge': '消耗', 'script': '脚本'}, inplace=True)
 
-                 charge_script_df[charge_script_df.activation >= 50][['消耗', '脚本', 'video_url']]. \
 
-                     to_excel(writer, sheet_name='汇创思拓_%s项目近一周高质量素材脚本' % project_name, index=False, header=True)
 
-                 charge_script_df[charge_script_df.activation <= 10][['消耗', '脚本', 'video_url']]. \
 
-                     to_excel(writer, sheet_name='汇创思拓_%s项目近一周低质量素材脚本' % project_name, index=False, header=True)
 
-             if 0 in channel:
 
-                 logger.info("项目:%s, 巨量引擎外部素材,开始执行!" % project_name)
 
-                 sql = """select signature, video_url  from ctop_ai_video_info_from_ocean_engine 
 
-                 where project_name = '%s' and stat_date >= '%s'""" % (project_name, one_week_date)
 
-                 df = pd.read_sql(sql, test_read_engine)
 
-                 logger.info("项目:%s, 巨量引擎优质素材个数为 %s!" % (project_name, len(df)))
 
-                 sql = """select md5 signature,task_result from tb_asr_result where word_text is not null and md5 in %s""" \
 
-                       % (tuple(df.signature.values),)
 
-                 script_df = pd.read_sql(sql, test_read_engine)
 
-                 script_df.drop_duplicates('signature', keep='first', inplace=True)
 
-                 script_df['script'] = script_df['task_result'].apply(lambda x: eval(x)['Data']['Result'].split(']')[1].strip())
 
-                 script_df.rename(columns={'script': '脚本'}, inplace=True)
 
-                 merge_df = script_df.merge(df, on='signature', how='inner')
 
-                 merge_df[['脚本', 'video_url']].to_excel(writer, sheet_name='巨量引擎_%s项目近一周高质量素材脚本' % project_name,
 
-                                                        index=False,
 
-                                                        header=True)
 
-             writer.save()
 
-             logger.info("项目: %s,文件导出完成!" % project_name)
 
-         except:
 
-             logger.error("项目:%s, 发生异常信息为%s!" % (project_info['project_name'], traceback.format_exc()))
 
 
  |