historical_material.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239
  1. import os
  2. import yaml
  3. from common_func import get_db_engine
  4. import pandas as pd
  5. from sklearn.preprocessing import StandardScaler
  6. from sklearn.neighbors import NearestNeighbors
  7. import numpy as np
  8. import sys
  9. import pickle
  10. import logging
  11. from concurrent_log import ConcurrentTimedRotatingFileHandler
  12. log_formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', '%Y/%m/%d %I:%M:%S %p')
  13. log_handler = ConcurrentTimedRotatingFileHandler("/data/pythonProject/similar_material/logs/historical_material.log", when="midnight",
  14. backupCount=100)
  15. log_handler.setFormatter(log_formatter)
  16. logger = logging.getLogger('historical_material_logger')
  17. logger.addHandler(log_handler)
  18. logger.setLevel(logging.DEBUG)
  19. logger.info("historical_material_logger started!")
  20. print('id of historical_material_logger %s' % id(logger))
  21. def scaler_feature(data, axis=0):
  22. """
  23. :param data: pd.DataFrame
  24. :param axis: 0-横向归一化 , 1-纵向归一化
  25. :return: z = (x - u) / s
  26. """
  27. scaler = StandardScaler()
  28. scaler_data = None
  29. if axis == 0:
  30. scaler_data = scaler.fit_transform(data.T).T
  31. if axis == 1:
  32. scaler_data = scaler.fit_transform(data)
  33. scaler_df = pd.DataFrame(scaler_data)
  34. scaler_df.index = data.index
  35. scaler_df.columns = [col + '_scaler' for col in data.columns]
  36. return scaler_df
  37. def get_segment_features(table, signature, dim_col, project_id, col_prefix):
  38. # 1 获取原始数据
  39. get_org_data_sql = """
  40. select signature, aclick, bclick, activation, charge, %s from %s
  41. where signature in %s and project_id = %s
  42. """ % (dim_col, table, tuple(signature), project_id)
  43. org_df = pd.read_sql(get_org_data_sql, read_engine)
  44. # 2 计算各分段的 行为率、激活率、激活成本,并进行横向归一化
  45. g = org_df.groupby(['signature', dim_col]).agg({'activation': sum, 'aclick': sum, 'bclick': sum, 'charge': sum}).unstack()
  46. # 该维度下有哪些分段值:如 gender维度下有:男、女、其他 , 需要去掉 '其他',因为业务含义不明确,没法解释模型效果
  47. segment_cols = g['charge'].columns
  48. if '其他' in segment_cols:
  49. segment_cols.remove('其他')
  50. segment_fea_df = pd.DataFrame()
  51. for col in segment_cols:
  52. segment_fea_df[col + '_bclick_aclick_pct'] = g[('bclick', col)] / g[('aclick', col)]
  53. segment_fea_df[col + '_activation_bclick_pct'] = g[('activation', col)] / g[('bclick', col)]
  54. segment_fea_df[col + '_cost'] = g[('charge', col)] / g[('activation', col)]
  55. # 3 计算累计激活中各分段的激活个数占比,并进行横向归一化
  56. for col in segment_cols:
  57. segment_fea_df[col + '_activation_pct'] = g[('activation', col)] / (g[[('activation', item) for item in segment_cols]].sum(axis=1))
  58. # 4 对 np.nan 和 np.inf 进行处理
  59. # 4-1 cost
  60. # np.inf : 分子>0/分母=0, 表示有消耗但是没有激活数,填充为 999
  61. # np.nan :分子=0/分母=0, 表示没有消耗也没有激活数,填充为 -1
  62. cost_cols = [col for col in segment_fea_df.columns if 'cost' in col]
  63. segment_fea_df[cost_cols] = segment_fea_df[cost_cols].replace({np.nan: -1, np.inf: 999})
  64. # 4-2 占比类
  65. # np.inf :分子>0/分母=0 ,如行为率:表示有点击但没有曝光,属于数据bug情况。需要剔除该素材并写入日志
  66. # np.nan :分子=0/分母=0 ,如激活率:表示没有激活也没有点击,填充为 0
  67. pct_cols = [col for col in segment_fea_df.columns if 'pct' in col]
  68. segment_fea_df[pct_cols] = segment_fea_df[pct_cols].replace({np.nan: 0})
  69. inf_df = np.isinf(segment_fea_df[pct_cols]).sum(axis=1)
  70. remove_sig_lst = inf_df[inf_df >= 1].index
  71. # TODO 添加日志 记录数据存在问题的素材
  72. if len(remove_sig_lst):
  73. print(remove_sig_lst)
  74. segment_fea_df = segment_fea_df[~segment_fea_df.index.isin(remove_sig_lst)]
  75. # 5 对特征进行横向归一化
  76. segment_fea_scaler_df = pd.DataFrame()
  77. for col in ['bclick_aclick_pct', 'activation_bclick_pct', 'cost', 'activation_pct']:
  78. tmp_df = segment_fea_df.filter(like=col)
  79. scaler_df = scaler_feature(tmp_df, axis=0)
  80. segment_fea_scaler_df = pd.concat([segment_fea_scaler_df, scaler_df], axis=1)
  81. # 6 col_prefix and index
  82. segment_fea_scaler_df.columns = [col_prefix + col for col in segment_fea_scaler_df.columns]
  83. segment_fea_scaler_df.index = g.index
  84. return segment_fea_scaler_df
  85. def get_signature_features(signature, project_id):
  86. """
  87. 计算素材整体的 行为率、激活率、激活成本
  88. :param project_id:
  89. :param signature:
  90. :return:
  91. """
  92. get_data_sql = """ select signature, sum(aclick) aclick, sum(bclick) bclick, sum(activation) activation, sum(charge) charge
  93. from ctop_kuaishou_audience_report_daily_age_material
  94. where signature in %s and project_id = %s
  95. group by signature
  96. """ % (tuple(signature), project_id)
  97. sig_org_df = pd.read_sql(get_data_sql, read_engine)
  98. sig_fea_df = pd.DataFrame()
  99. sig_fea_df['cost'] = sig_org_df['charge'] / sig_org_df['activation']
  100. sig_fea_df['bclick_aclick_pct'] = sig_org_df['bclick'] / sig_org_df['aclick']
  101. sig_fea_df['activation_bclick_pct'] = sig_org_df['activation'] / sig_org_df['bclick']
  102. # 对特征进行纵向的归一化
  103. sig_fea_df.index = sig_org_df['signature'].values
  104. sig_fea_scaler_df = scaler_feature(sig_fea_df, axis=1)
  105. return sig_fea_scaler_df
  106. if __name__ == '__main__':
  107. # 1、读取配置文件
  108. with open('config/config.yaml', mode='r', encoding='utf-8') as f:
  109. config = yaml.load(f.read(), Loader=yaml.FullLoader)
  110. # 1-1 数据库连接引擎,依据开发环境/生产环境 进行切换
  111. if os.getenv('LYY_DEV', 'unknown') == 'dev':
  112. write_engine = get_db_engine(config['devDB'])
  113. else:
  114. write_engine = get_db_engine(config['productDB'])
  115. read_engine = get_db_engine(config['productDB'])
  116. # 2、获取历史所有达标的素材
  117. project_id = 458
  118. get_sig_sql = """select signature, sum(activation) activation
  119. from ctop_kuaishou_audience_report_daily_age_material
  120. where project_id = %s
  121. group by signature
  122. having sum(activation) >= 100
  123. """ % project_id
  124. sig_df = pd.read_sql(get_sig_sql, read_engine)
  125. sig_df = sig_df[~sig_df.signature.isnull()]
  126. sig_lst = list(sig_df.signature.values)
  127. print('sig_lst:', len(sig_lst))
  128. # TODO for test
  129. # sig_lst = ['cefd07cf7951ba9674d43c1d9c112f37',
  130. # '6f11859acaf4f1dd501706f8d50995d8',
  131. # 'bc8fe6ae97db794acaf050acff4386b9',
  132. # '59a34e41bfcf321a931a0b204a8609c9',
  133. # 'c67604889af726d49401f40283865bb0',
  134. # '75237de4427a9ff6da994976bf516f78',
  135. # '0ab2a8816b1b34638767862123fc74a6',
  136. # '177a0f359eb90623b7ecaf1c8d0b9db4',
  137. # 'd3c9bbd98d07afb7be9fe8b1b40133d3']
  138. if sig_lst is None:
  139. # TODO 加入 logger
  140. sys.exit(0)
  141. if len(sig_lst) == 1:
  142. # TODO 加入 logger
  143. sys.exit(1)
  144. # 2、构造特征
  145. # 2-1 获取素材整体的特征情况:行为率、激活率、激活成本
  146. sig_feature_df = get_signature_features(sig_lst, project_id)
  147. # 2-2 获取各维度(age,gender)各分段(男、女等)的行为率、激活率、激活成本、激活占比,并进行纵向归一化
  148. # get_segment_features(table, signature, dim_col, project_id)
  149. table_dim_tuple = [('ctop_kuaishou_audience_report_daily_age_material', 'age_segment', 'age_'),
  150. ('ctop_kuaishou_audience_report_daily_gender_material', 'gender', 'gender_')]
  151. segment_feature_df = pd.DataFrame()
  152. for table, dim_col, col_prefix in table_dim_tuple:
  153. tmp_df = get_segment_features(table, sig_lst, dim_col, project_id, col_prefix)
  154. segment_feature_df = pd.concat([segment_feature_df, tmp_df], axis=1)
  155. # 2-3 素材整体的特征 和 各维度各分段的特征 进行合并,得到最终的特征DataFrame
  156. final_feature_df = pd.concat([sig_feature_df, segment_feature_df], axis=1)
  157. # 部分素材在 age 或者 gender 维度上没有数据,特征concat时,会导致空值
  158. # 去掉这部分的素材,防止在调用 NearestNeighbors 时,报错
  159. null_df = final_feature_df.isnull().sum(axis=1)
  160. exist_null_fea_sig_lst = null_df[null_df >= 1].index
  161. # TODO 添加日志 记录存在数据缺失的素材
  162. if len(exist_null_fea_sig_lst) > 0:
  163. print(exist_null_fea_sig_lst)
  164. final_feature_df = final_feature_df[~final_feature_df.index.isin(exist_null_fea_sig_lst)]
  165. with open('final_feature_df.pkl', 'wb') as f:
  166. pickle.dump(final_feature_df, f)
  167. # 3 Finding the Nearest Neighbors
  168. k = config['nearestNeighbors']['k']
  169. distance_threshold = config['nearestNeighbors']['distanceThreshold']
  170. X = final_feature_df.values
  171. nbrs = NearestNeighbors(n_neighbors=k, algorithm='ball_tree').fit(X)
  172. distances, indices = nbrs.kneighbors(X)
  173. with open('distances.pkl', 'wb') as f:
  174. pickle.dump(distances, f)
  175. with open('indices.pkl', 'wb') as f:
  176. pickle.dump(indices, f)
  177. # 3-1 两点之间的距离超过阈值时,不作为相邻素材
  178. # mask 中,为 True 的元素替换为 NaN
  179. mask = distances > distance_threshold
  180. indices_mask_df = pd.DataFrame(indices).mask(mask)
  181. indices_mask_df = indices_mask_df.astype(dtype=pd.Int64Dtype())
  182. # 3-2 把 index 替换为具体的 signatures
  183. sig_lst = final_feature_df.index.values
  184. indices_mask_df.index = sig_lst
  185. indices_mask_df = indices_mask_df.applymap(lambda x: sig_lst[x] if not pd.isnull(x) else x)
  186. res_lst = []
  187. for index, row in indices_mask_df.iterrows():
  188. others = [item for item in row if (not pd.isnull(item)) and (item != index)]
  189. res_lst.append([index] + others)
  190. res_df = pd.DataFrame(res_lst)
  191. res_df.columns = ['self'] + ['N' + str(i + 1) for i in range(res_df.shape[1] - 1)]
  192. columns = ['self'] + ['N' + str(i + 1) for i in range(k)]
  193. res_df = res_df.reindex(columns=columns)
  194. with open('res_df.pkl', 'wb') as f:
  195. pickle.dump(res_df, f)
  196. # 4 TODO 结果入库
  197. pass