get_similar_material.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230
  1. import os
  2. import yaml
  3. from common_func import get_db_engine
  4. import pandas as pd
  5. from sklearn.preprocessing import StandardScaler
  6. from sklearn.neighbors import NearestNeighbors
  7. import numpy as np
  8. import datetime
  9. import sys
  10. import traceback
  11. import pickle
  12. import logging
  13. from concurrent_log import ConcurrentTimedRotatingFileHandler
  14. log_formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', '%Y/%m/%d %I:%M:%S %p')
  15. log_handler = ConcurrentTimedRotatingFileHandler("/data/pythonProject/similar_material/logs/get_similar_material.log", when="midnight",
  16. backupCount=100)
  17. log_handler.setFormatter(log_formatter)
  18. logger = logging.getLogger('historical_material_logger')
  19. logger.addHandler(log_handler)
  20. logger.setLevel(logging.DEBUG)
  21. logger.info("historical_material_logger started!")
  22. print('id of historical_material_logger %s' % id(logger))
  23. def scaler_feature(data, axis=0):
  24. """
  25. :param data: pd.DataFrame
  26. :param axis: 0-横向归一化 , 1-纵向归一化
  27. :return: z = (x - u) / s
  28. """
  29. scaler = StandardScaler()
  30. scaler_data = None
  31. if axis == 0:
  32. scaler_data = scaler.fit_transform(data.T).T
  33. if axis == 1:
  34. scaler_data = scaler.fit_transform(data)
  35. scaler_df = pd.DataFrame(scaler_data)
  36. scaler_df.index = data.index
  37. scaler_df.columns = [col + '_scaler' for col in data.columns]
  38. return scaler_df
  39. def get_segment_features(table, signature, dim_col, project_id, col_prefix):
  40. # 1 获取原始数据
  41. get_org_data_sql = """
  42. select signature, aclick, bclick, activation, charge, %s from %s
  43. where signature in %s and project_id = %s
  44. """ % (dim_col, table, tuple(signature), project_id)
  45. org_df = pd.read_sql(get_org_data_sql, read_engine)
  46. # 2 计算各分段的 行为率、激活率、激活成本,并进行横向归一化
  47. g = org_df.groupby(['signature', dim_col]).agg({'activation': sum, 'aclick': sum, 'bclick': sum, 'charge': sum}).unstack()
  48. # 该维度下有哪些分段值:如 gender维度下有:男、女、其他 , 需要去掉 '其他',因为业务含义不明确,没法解释模型效果
  49. segment_cols = list(g['charge'].columns)
  50. if '其他' in segment_cols:
  51. segment_cols.remove('其他')
  52. segment_fea_df = pd.DataFrame()
  53. for col in segment_cols:
  54. segment_fea_df[col + '_bclick_aclick_pct'] = g[('bclick', col)] / g[('aclick', col)]
  55. segment_fea_df[col + '_activation_bclick_pct'] = g[('activation', col)] / g[('bclick', col)]
  56. segment_fea_df[col + '_cost'] = g[('charge', col)] / g[('activation', col)]
  57. # 3 计算累计激活中各分段的激活个数占比,并进行横向归一化
  58. for col in segment_cols:
  59. segment_fea_df[col + '_activation_pct'] = g[('activation', col)] / (g[[('activation', item) for item in segment_cols]].sum(axis=1))
  60. # 4 对 np.nan 和 np.inf 进行处理
  61. # 4-1 cost
  62. # np.inf : 分子>0/分母=0, 表示有消耗但是没有激活数,填充为 999
  63. # np.nan :分子=0/分母=0, 表示没有消耗也没有激活数,填充为 -1
  64. cost_cols = [col for col in segment_fea_df.columns if 'cost' in col]
  65. segment_fea_df[cost_cols] = segment_fea_df[cost_cols].replace({np.nan: -1, np.inf: 999})
  66. # 4-2 占比类
  67. # np.inf :分子>0/分母=0 ,如行为率:表示有点击但没有曝光,属于数据bug情况。需要剔除该素材并写入日志
  68. # np.nan :分子=0/分母=0 ,如激活率:表示没有激活也没有点击,填充为 0
  69. pct_cols = [col for col in segment_fea_df.columns if 'pct' in col]
  70. segment_fea_df[pct_cols] = segment_fea_df[pct_cols].replace({np.nan: 0})
  71. inf_df = np.isinf(segment_fea_df[pct_cols]).sum(axis=1)
  72. remove_sig_lst = inf_df[inf_df >= 1].index
  73. if len(remove_sig_lst):
  74. logger.error('素材的人群报表数据存在异常(如:点击数据大于0,但是曝光数据等于0),以下素材不参与相似素材计算:%s' % remove_sig_lst)
  75. print(remove_sig_lst)
  76. segment_fea_df = segment_fea_df[~segment_fea_df.index.isin(remove_sig_lst)]
  77. # 5 对特征进行横向归一化
  78. segment_fea_scaler_df = pd.DataFrame()
  79. for col in ['bclick_aclick_pct', 'activation_bclick_pct', 'cost', 'activation_pct']:
  80. tmp_df = segment_fea_df.filter(like=col)
  81. scaler_df = scaler_feature(tmp_df, axis=0)
  82. segment_fea_scaler_df = pd.concat([segment_fea_scaler_df, scaler_df], axis=1)
  83. # 6 col_prefix and index
  84. segment_fea_scaler_df.columns = [col_prefix + col for col in segment_fea_scaler_df.columns]
  85. segment_fea_scaler_df.index = g.index
  86. return segment_fea_scaler_df
  87. def get_signature_features(signature, project_id):
  88. """
  89. 计算素材整体的 行为率、激活率、激活成本
  90. :param project_id:
  91. :param signature:
  92. :return:
  93. """
  94. get_data_sql = """ select signature, sum(aclick) aclick, sum(bclick) bclick, sum(activation) activation, sum(charge) charge
  95. from ctop_kuaishou_audience_report_daily_age_material
  96. where signature in %s and project_id = %s
  97. group by signature
  98. """ % (tuple(signature), project_id)
  99. sig_org_df = pd.read_sql(get_data_sql, read_engine)
  100. sig_fea_df = pd.DataFrame()
  101. sig_fea_df['cost'] = sig_org_df['charge'] / sig_org_df['activation']
  102. sig_fea_df['bclick_aclick_pct'] = sig_org_df['bclick'] / sig_org_df['aclick']
  103. sig_fea_df['activation_bclick_pct'] = sig_org_df['activation'] / sig_org_df['bclick']
  104. # 对特征进行纵向的归一化
  105. sig_fea_df.index = sig_org_df['signature'].values
  106. sig_fea_scaler_df = scaler_feature(sig_fea_df, axis=1)
  107. return sig_fea_scaler_df
  108. if __name__ == '__main__':
  109. try:
  110. # 1、读取配置文件
  111. with open('/data/pythonProject/similar_material/config/config.yaml', mode='r', encoding='utf-8') as f:
  112. config = yaml.load(f.read(), Loader=yaml.FullLoader)
  113. # 1-1 数据库连接引擎,依据开发环境/生产环境 进行切换
  114. # 读数据库引擎使用生产数据库,写数据库引擎依据系统环境进行切换(测试数据库/生产数据库)
  115. if os.getenv('LYY_DEV', 'unknown') == 'dev':
  116. write_engine = get_db_engine(config['devDB'])
  117. else:
  118. write_engine = get_db_engine(config['productDB'])
  119. read_engine = get_db_engine(config['productDB'])
  120. # 2、按项目计算相似素材
  121. project_id_lst = config['projectId']
  122. for project_id in project_id_lst:
  123. # 获取历史所有达标的素材 - - 暂定为激活个数达到100个的素材
  124. get_sig_sql = """select signature, sum(activation) activation
  125. from ctop_kuaishou_audience_report_daily_age_material
  126. where project_id = %s
  127. group by signature
  128. having sum(activation) >= 100
  129. """ % project_id
  130. sig_df = pd.read_sql(get_sig_sql, read_engine)
  131. sig_df = sig_df[~sig_df.signature.isnull()]
  132. sig_lst = list(sig_df.signature.values)
  133. if sig_lst is None:
  134. logger.info("没有达标的素材,系统退出!")
  135. sys.exit(0)
  136. if len(sig_lst) == 1:
  137. logger.info("达标的素材只有一个,没法计算得到与之相似的素材,系统退出!")
  138. sys.exit(1)
  139. # 2、构造特征
  140. # 2-1 获取素材整体的特征情况:行为率、激活率、激活成本
  141. sig_feature_df = get_signature_features(sig_lst, project_id)
  142. # 2-2 获取各维度(age,gender)各分段(男、女等)的行为率、激活率、激活成本、激活占比,并进行纵向归一化
  143. # table_dim_tuple 中的元素:表名称、分段名称、构造特征名称的前缀
  144. table_dim_tuple = [('ctop_kuaishou_audience_report_daily_age_material', 'age_segment', 'age_'),
  145. ('ctop_kuaishou_audience_report_daily_gender_material', 'gender', 'gender_')]
  146. segment_feature_df = pd.DataFrame()
  147. for table, dim_col, col_prefix in table_dim_tuple:
  148. tmp_df = get_segment_features(table, sig_lst, dim_col, project_id, col_prefix)
  149. segment_feature_df = pd.concat([segment_feature_df, tmp_df], axis=1)
  150. # 2-3 素材整体的特征 和 各维度各分段的特征 进行合并,得到最终的特征DataFrame
  151. final_feature_df = pd.concat([sig_feature_df, segment_feature_df], axis=1)
  152. # 部分素材在 age 或者 gender 维度上没有数据,特征 concat 时,会导致空值
  153. # 去掉这部分的素材,防止在调用 NearestNeighbors 时,报错
  154. null_df = final_feature_df.isnull().sum(axis=1)
  155. exist_null_fea_sig_lst = null_df[null_df >= 1].index
  156. # 记录存在部分维度数据缺失的素材
  157. if len(exist_null_fea_sig_lst) > 0:
  158. logger.info("以下这些素材存在部分维度数据缺失:%s" % exist_null_fea_sig_lst)
  159. final_feature_df = final_feature_df[~final_feature_df.index.isin(exist_null_fea_sig_lst)]
  160. # 3 Finding the Nearest Neighbors
  161. k = config['nearestNeighbors']['k']
  162. distance_threshold = config['nearestNeighbors']['distanceThreshold']
  163. X = final_feature_df.values
  164. nbrs = NearestNeighbors(n_neighbors=k, algorithm='ball_tree').fit(X)
  165. distances, indices = nbrs.kneighbors(X)
  166. # 3-1 两点之间的距离超过阈值时,不作为相邻素材
  167. # mask 中,为 True 的元素替换为 NaN
  168. mask = distances > distance_threshold
  169. indices_mask_df = pd.DataFrame(indices).mask(mask)
  170. indices_mask_df = indices_mask_df.astype(dtype=pd.Int64Dtype())
  171. # 3-2 把 index 替换为具体的 signatures
  172. sig_lst = final_feature_df.index.values
  173. indices_mask_df.index = sig_lst
  174. indices_mask_df = indices_mask_df.applymap(lambda x: sig_lst[x] if not pd.isnull(x) else x)
  175. res_lst = []
  176. for index, row in indices_mask_df.iterrows():
  177. others = [item for item in row if (not pd.isnull(item)) and (item != index)]
  178. res_lst.append([index] + others)
  179. res_df = pd.DataFrame(res_lst)
  180. res_df.columns = ['signature'] + ['similar_signature_' + str(i + 1) for i in range(res_df.shape[1] - 1)]
  181. columns = ['signature'] + ['similar_signature_' + str(i + 1) for i in range(k-1)]
  182. res_df = res_df.reindex(columns=columns)
  183. # 4 结果入库
  184. res_df['stat_date'] = datetime.date.today().strftime('%Y-%m-%d')
  185. res_df['project_id'] = project_id
  186. res_df.to_sql(name="ctop_ai_kuaishou_signature_similar",
  187. con=write_engine,
  188. if_exists='append',
  189. index=False)
  190. logger.info("project_id = %s 的素材相似度计算完成!" % project_id)
  191. except:
  192. logger.error("异常信息为:%s" % (traceback.format_exc()))