| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236 | import yamlimport osimport pandas as pdimport numpy as npfrom utils.commonFunc import update_dict, get_db_enginefrom functools import reducefrom itertools import productfrom datetime import datetimefrom statsmodels.stats.proportion import proportions_ztestclass BayesFeatures(object):    """    计算素材单维度的特征值    """    def __init__(self, signature, target_type, dim, dim_config):        self.signature = signature        self.target_type = target_type        self.dim = dim        self.dim_config = dim_config        self.table = self.dim_config['table']        self.file_name = self.dim_config['fieldName']        self.dim_lst = self.dim_config['Lst']        self.window_size = self.dim_config['windowSize']        self.window_combine = []        self.bayes_feature = {}    def get_window_combine(self):        """        根据size得到指定维度的滑窗组合。        如 age: ['18-23岁', '24-30岁','31-40岁',....] 和 size = 2        得到 ['18-23岁', '24-30岁'],['24-30岁','31-40岁'],......        """        start = 0        while (start < len(self.dim_lst)) and (start + self.window_size <= len(self.dim_lst)):            self.window_combine.append(self.dim_lst[start: start + self.window_size])            start += 1        # 添加一个全部的组合,等同于'不限'        self.window_combine.append(['不限'])    def get_bayes_feature(self):        # 1、获取指定素材在指定维度下的人群数据(近一个月内的数据表现)        sql = '''           select signature,                   %s,                  sum(aclick) aclick,                  sum(bclick) bclick,                  sum(activation) activation           from %s           where signature = '%s'            -- and datediff(now(),stat_date)<=30           group by signature,  %s           ''' % (self.file_name, self.table, self.signature, self.file_name)        df = pd.read_sql(sql, engine)        if self.target_type == 'action_ratio':            # 计算行为率(bclick/aclick)的组合特征值            df['unbclick'] = df['aclick'] - df['bclick']            sig_pos_pct = df['bclick'].sum() / df['aclick'].sum()  # 素材曝光-点击的概率            sig_neg_pct = 1 - sig_pos_pct  # 素材曝光-未点击的概率            self.bayes_feature['sig_pos_pct'] = sig_pos_pct            self.bayes_feature['sig_neg_pct'] = sig_neg_pct            self.bayes_feature['sample_size'] = df['aclick'].sum()  # 素材曝光量,后续计算显著差异用            for sub_combine in self.window_combine:                if sub_combine != ['不限']:                    key = self.dim + '_' + '|'.join(sub_combine)                    if self.dim != 'city':                        pos_pct = df[df[self.file_name].isin(sub_combine)].bclick.sum() / df.bclick.sum()                        neg_pct = df[df[self.file_name].isin(sub_combine)].unbclick.sum() / df.unbclick.sum()                    if self.dim == 'city':                        # 获取 city_level 对应的 city_name                        city_lst = []                        for city_level in sub_combine:                            get_city_sql = """                            select city_name from ctop_kuaishou_city_level where city_level = '%s'                            """ % city_level                            city_df = pd.read_sql(get_city_sql, engine)                            city_lst.extend(city_df['city_name'].values)                        pos_pct = df[df[self.file_name].isin(city_lst)].bclick.sum() / df.bclick.sum()                        neg_pct = df[df[self.file_name].isin(city_lst)].unbclick.sum() / df.unbclick.sum()                    self.bayes_feature[key] = {'pos_pct': pos_pct, 'neg_pct': neg_pct}                else:                    self.bayes_feature[self.dim + '_' + '不限'] = {'pos_pct': 1, 'neg_pct': 1}        if self.target_type == 'convert_ratio':            # 计算转化率(activation/bclick)的组合特征值            df['unactivation'] = df['bclick'] - df['activation']            sig_pos_pct = df['activation'].sum() / df['bclick'].sum()  # 素材点击-转化的概率            sig_neg_pct = 1 - sig_pos_pct  # 素材点击-未转化的概率            self.bayes_feature['sig_pos_pct'] = sig_pos_pct            self.bayes_feature['sig_neg_pct'] = sig_neg_pct            self.bayes_feature['sample_size'] = df['bclick'].sum()  # 素材行为量,后续计算显著差异用            for sub_combine in self.window_combine:                if sub_combine != '不限':                    key = self.dim + '_' + '|'.join(sub_combine)                    if self.dim != 'city':                        pos_pct = df[df[self.file_name].isin(sub_combine)].activation.sum() / df.activation.sum()                        neg_pct = df[df[self.file_name].isin(sub_combine)].unactivation.sum() / df.unactivation.sum()                    if self.dim == 'city':                        # 获取 city_level 对应的 city_name                        city_lst = []                        for city_level in sub_combine:                            get_city_sql = """                            select city_name from ctop_kuaishou_city_level where city_level = '%s'                            """ % city_level                            city_df = pd.read_sql(get_city_sql, engine)                            city_lst.extend(city_df['city_name'].values)                        pos_pct = df[df[self.file_name].isin(city_lst)].activation.sum() / df.activation.sum()                        neg_pct = df[df[self.file_name].isin(city_lst)].unactivation.sum() / df.unactivation.sum()                    self.bayes_feature[key] = {'pos_pct': pos_pct, 'neg_pct': neg_pct}                else:                    self.bayes_feature[self.dim + '_' + '不限'] = {'pos_pct': 1, 'neg_pct': 1}class BayesCombine(object):    """    依据特征值,计算多维度的组合预估值    """    def __init__(self, signature, project_id, target_type, dim_features):        self.signature = signature        self.project_id = project_id        self.target_type = target_type        self.dim_features = dim_features        self.bayes_combine_df = pd.DataFrame()        self.actual_prob = None        self.sample_size = None    def get_bayes_estimate(self):        dim_combine_lst = [[key for key in fea.keys() if key not in ['sig_pos_pct', 'sig_neg_pct', 'sample_size']]                           for fea in self.dim_features]        self.dim_features = reduce(update_dict, self.dim_features)        self.actual_prob = self.dim_features['sig_pos_pct']        self.sample_size = self.dim_features['sample_size']        for ele in product(*dim_combine_lst):            pos_pct_lst = [self.dim_features[key]['pos_pct'] for key in ele]            prob_pos = reduce(lambda x, y: x * y, pos_pct_lst)            neg_pct_lst = [self.dim_features[key]['neg_pct'] for key in ele]            prob_neg = reduce(lambda x, y: x * y, neg_pct_lst)            prob = (prob_pos * self.dim_features['sig_pos_pct']) / (prob_neg * self.dim_features['sig_neg_pct'])            out_dict = dict(zip([e.split('_')[0] for e in ele], [e.split('_')[-1] for e in ele]))            # 计算该组合的概率值 与 实际投放的概率值 是否存在显著性差异:显著好、 显著差            count = np.array([self.sample_size * prob, self.sample_size * self.actual_prob])            nobs = np.array([self.sample_size, self.sample_size])            z_score, p_value = proportions_ztest(count=count, nobs=nobs, value=None, alternative='two-sided', prop_var=False)            out_dict['z_score'] = z_score            out_dict['p_value'] = p_value            out_dict['sample_size'] = self.sample_size            # TODO 调用人群预估覆盖接口,得到该组合的人群覆盖数            out_dict['crowd_coverage_cnt'] = 1000            out_dict['combine_estimate_prob'] = prob            out_dict['actual_prob'] = self.actual_prob            out_dict['signature'] = self.signature            out_dict['project_id'] = self.project_id            out_dict['target_type'] = self.target_type            out_dict['stat_date'] = str(datetime.now().date())            combine = pd.DataFrame([out_dict])            self.bayes_combine_df = self.bayes_combine_df.append(combine, ignore_index=True)    def write_to_db(self):        self.bayes_combine_df.to_sql(name="ctop_ai_kuaishou_signature_recommended_target_combine",                                     con=engine,                                     if_exists='append',                                     index=False)if __name__ == '__main__':    # 1、读取配置文件    with open('config/config.yaml', mode='r', encoding='utf-8') as f:        config = yaml.load(f.read(), Loader=yaml.FullLoader)    # 1-1 数据库连接引擎,依据开发环境/生产环境 进行切换    if os.getenv('LYY_DEV', 'unknown') == 'dev':        engine = get_db_engine(config['devDB'])    else:        engine = get_db_engine(config['productDB'])    # 2、 参与定向组合的维度    target_dim = [key for key in config['bayesDim'].keys() if config['bayesDim'][key]['isOn']]    # 4、计算贝叶斯组合入库    for project_id in config['projectId']:        # 4.1 获取指定项目下当前活跃的素材信息, 如近3天内累计激活个数达到50个(开发环境或者生产环境,这部分都读取生产数据库)        product_engine = get_db_engine(config['productDB'])        sql = '''           select signature from ctop_kuaishou_report_daily_material            where account_id in (select account_id from ctop_user_allocation where project_id = %s)           and datediff(now(),stat_date) <= %s           group by signature           having sum(activation) >= %s           ''' % (project_id,                  config['activeMaterialFilterRule']['days'],                  config['activeMaterialFilterRule']['activation'])        df = pd.read_sql(sql, product_engine)        active_signatures = df[~df.signature.isnull()].signature.values        # 4.2 在素材人群报表筛选里面近一个月累计100个激活        # TODO 等生产的素材人群表存在之后,修改表名,添加 days        sql = """        select signature from ctop_kuaishou_audience_daily_report_by_signature_age          where signature in %s           group by signature           having sum(activation) >= %s        """ % (tuple(active_signatures),               config['getBaysCombineMaterialFilterRule']['activation'])        df = pd.read_sql(sql, engine)        signature_lst = df['signature'].values        # TODO 这行代码用于测试        signature_lst = ['0070efb7557b2a04cf3d4a6f243c3cd8', '03b93728f0d82ea7865c3c7cf632bc1b']        # 4.2 计算指定素材的贝叶斯特征        for sig in signature_lst:            for t_type in config['targetType']:                bayes_feature_lst = []                for dimension in target_dim:                    cls = BayesFeatures(sig, t_type, dimension, config['bayesDim'][dimension])                    # 计算滑窗组合                    cls.get_window_combine()                    cls.get_bayes_feature()                    bayes_feature_lst.append(cls.bayes_feature)                # 依据特征值,计算多维度的组合预估值                cls = BayesCombine(sig, project_id, t_type, bayes_feature_lst)                cls.get_bayes_estimate()                cls.write_to_db()
 |