| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285 | import jsonimport yamlimport osimport pandas as pdimport numpy as npfrom utils.commonFunc import update_dict, get_db_engine, city_code_transform, age_code_transform, gender_code_transformfrom functools import reducefrom itertools import productfrom datetime import datetimefrom statsmodels.stats.proportion import proportions_ztestimport tracebackimport requestsfrom utils.BaseClass import NpEncoderfrom config.url import estimate_people_number_url, headersimport loggingfrom concurrent_log import ConcurrentTimedRotatingFileHandlerlog_formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', '%Y/%m/%d %I:%M:%S %p')log_handler = ConcurrentTimedRotatingFileHandler("/data/pythonProject/ai_target/logs/BayesCombine.log", when="midnight", backupCount=100)log_handler.setFormatter(log_formatter)logger = logging.getLogger('bayes_combine_logger')logger.addHandler(log_handler)logger.setLevel(logging.DEBUG)print('id of bayes_combine_logger %s' % id(logger))class BayesFeatures(object):    """    计算素材单维度的特征值    """    def __init__(self, signature, target_type, dim, dim_config):        self.signature = signature        self.target_type = target_type        self.dim = dim        self.dim_config = dim_config        self.table = self.dim_config['table']        self.file_name = self.dim_config['fieldName']        self.dim_lst = self.dim_config['Lst']        self.window_size = self.dim_config['windowSize']        self.window_combine = []        self.bayes_feature = {}    def get_window_combine(self):        """        根据size得到指定维度的滑窗组合。        如 age: ['18-23岁', '24-30岁','31-40岁',....] 和 size = 2        得到 ['18-23岁', '24-30岁'],['24-30岁','31-40岁'],......        """        start = 0        while (start < len(self.dim_lst)) and (start + self.window_size <= len(self.dim_lst)):            self.window_combine.append(self.dim_lst[start: start + self.window_size])            start += 1        # 添加一个全部的组合,等同于'不限'        self.window_combine.append(['不限'])    def get_bayes_feature(self):        # 1、获取指定素材在指定维度下的人群数据(近一个月内的数据表现)        sql = '''           select signature,                   %s,                  sum(aclick) aclick,                  sum(bclick) bclick,                  sum(activation) activation           from %s           where signature = '%s'            and datediff(now(),stat_date)<= %s           group by signature,  %s           ''' % (self.file_name, self.table, self.signature, config['getBaysCombineMaterialFilterRule']['days'], self.file_name)        df = pd.read_sql(sql, product_engine)        if self.target_type == 'action_ratio':            # 计算行为率(bclick/aclick)的组合特征值            df['unbclick'] = df['aclick'] - df['bclick']            for sub_combine in self.window_combine:                if sub_combine != ['不限']:                    key = self.dim + '_' + '|'.join(sub_combine)                    if self.dim != 'city':                        pos_pct = df[df[self.file_name].isin(sub_combine)].bclick.sum() / df.bclick.sum()                        neg_pct = df[df[self.file_name].isin(sub_combine)].unbclick.sum() / df.unbclick.sum()                    if self.dim == 'city':                        # 获取 city_level 对应的 city_name                        city_lst = []                        for city_level in sub_combine:                            get_city_sql = """                            select city_name from ctop_kuaishou_city_level where city_level = '%s'                            """ % city_level                            city_df = pd.read_sql(get_city_sql, product_engine)                            city_lst.extend(city_df['city_name'].values)                        pos_pct = df[df[self.file_name].isin(city_lst)].bclick.sum() / df.bclick.sum()                        neg_pct = df[df[self.file_name].isin(city_lst)].unbclick.sum() / df.unbclick.sum()                    self.bayes_feature[key] = {'pos_pct': pos_pct, 'neg_pct': neg_pct}                else:                    self.bayes_feature[self.dim + '_' + '不限'] = {'pos_pct': 1, 'neg_pct': 1}        if self.target_type == 'convert_ratio':            # 计算转化率(activation/bclick)的组合特征值            df['unactivation'] = df['bclick'] - df['activation']            for sub_combine in self.window_combine:                if sub_combine != '不限':                    key = self.dim + '_' + '|'.join(sub_combine)                    if self.dim != 'city':                        pos_pct = df[df[self.file_name].isin(sub_combine)].activation.sum() / df.activation.sum()                        neg_pct = df[df[self.file_name].isin(sub_combine)].unactivation.sum() / df.unactivation.sum()                    if self.dim == 'city':                        # 获取 city_level 对应的 city_name                        city_lst = []                        for city_level in sub_combine:                            get_city_sql = """                            select city_name from ctop_kuaishou_city_level where city_level = '%s'                            """ % city_level                            city_df = pd.read_sql(get_city_sql, product_engine)                            city_lst.extend(city_df['city_name'].values)                        pos_pct = df[df[self.file_name].isin(city_lst)].activation.sum() / df.activation.sum()                        neg_pct = df[df[self.file_name].isin(city_lst)].unactivation.sum() / df.unactivation.sum()                    self.bayes_feature[key] = {'pos_pct': pos_pct, 'neg_pct': neg_pct}                else:                    self.bayes_feature[self.dim + '_' + '不限'] = {'pos_pct': 1, 'neg_pct': 1}class BayesCombine(object):    """    依据特征值,计算多维度的组合预估值    """    def __init__(self, signature, project_id, target_type, dim_features):        self.signature = signature        self.project_id = project_id        self.target_type = target_type        self.dim_features = dim_features        self.bayes_combine_df = pd.DataFrame()        self.actual_prob = None        self.sample_size = None        self.sig_pos_pct = None        self.sig_neg_pct = None    def get_bayes_estimate(self):        dim_combine_lst = [[key for key in fea.keys()] for fea in self.dim_features]        self.dim_features = reduce(update_dict, self.dim_features)        # 通过 年龄对应的表: ctop_kuaishou_audience_daily_report_by_signature_age 来获取样本量,和正负比例值(其他表的数据可能会丢失导致不全)        get_sample_and_pct_sql = """        select sum(aclick) aclick, sum(bclick) bclick, sum(activation) activation  from %s  where         signature = '%s'         and datediff(now(),stat_date)<= %s        """ % (config['bayesDim']['age']['table'], self.signature, config['getBaysCombineMaterialFilterRule']['days'])        sample_pct_df = pd.read_sql(get_sample_and_pct_sql, product_engine)        if self.target_type == 'action_ratio':            self.sig_pos_pct = sample_pct_df['bclick'].sum() / sample_pct_df['aclick'].sum()            self.sig_neg_pct = 1 - self.sig_pos_pct            self.actual_prob = self.sig_pos_pct            self.sample_size = int(sample_pct_df.aclick.sum())        elif self.target_type == 'convert_ratio':            self.sig_pos_pct = sample_pct_df['activation'].sum() / sample_pct_df['bclick'].sum()            self.sig_neg_pct = 1 - self.sig_pos_pct            self.actual_prob = self.sig_pos_pct            self.sample_size = int(sample_pct_df.bclick.sum())        for ele in product(*dim_combine_lst):            pos_pct_lst = [self.dim_features[key]['pos_pct'] for key in ele]            prob_pos = reduce(lambda x, y: x * y, pos_pct_lst)            neg_pct_lst = [self.dim_features[key]['neg_pct'] for key in ele]            prob_neg = reduce(lambda x, y: x * y, neg_pct_lst)            prob = (prob_pos * self.sig_pos_pct) / (prob_neg * self.sig_neg_pct)            out_dict = dict(zip([e.split('_')[0] for e in ele], [e.split('_')[-1] for e in ele]))            # 计算该组合的概率值 与 实际投放的概率值 是否存在显著性差异:显著好、 显著差            count = np.array([self.sample_size * prob, self.sample_size * self.actual_prob])            nobs = np.array([self.sample_size, self.sample_size])            z_score, p_value = proportions_ztest(count=count, nobs=nobs, value=None, alternative='two-sided', prop_var=False)            out_dict['z_score'] = z_score            out_dict['p_value'] = p_value            out_dict['sample_size'] = self.sample_size            try:                # 获取该项目下在投的账号                get_acc_sql = """select account_id from ctop_user_allocation where project_id = %s and account_status=0 limit 1""" % \                              self.project_id                account_df = pd.read_sql(get_acc_sql, product_engine)                account_id = account_df['account_id'].values[0]                request_data = {'region': city_code_transform(out_dict.get('city')),                                'ages_range': age_code_transform(out_dict.get('age')),                                'gender': gender_code_transform(out_dict.get('gender')),                                'advertiser_id': account_id}                request = requests.post(url=estimate_people_number_url,                                        headers=headers,                                        data=json.dumps(request_data, cls=NpEncoder))                response_data = json.loads(request.text)                if response_data['code'] == 0:                    out_dict['crowd_coverage_cnt'] = response_data['data'].get('audience_prediction_num')                else:                    logger.error("人群预估覆盖接口调用报错,请求数据为%s,返回数据为%s" % (str(request_data), str(response_data)))            except:                logger.error("人群预估覆盖接口调用报错,请求数据为%s,请求地址为%s, 返回数据为%s, 异常信息为%s" %                             (str(request_data), estimate_people_number_url, str(response_data), traceback.format_exc()))            out_dict['combine_estimate_prob'] = prob            out_dict['actual_prob'] = self.actual_prob            out_dict['signature'] = self.signature            out_dict['project_id'] = self.project_id            out_dict['target_type'] = self.target_type            out_dict['stat_date'] = str(datetime.now().date())            combine = pd.DataFrame([out_dict])            self.bayes_combine_df = self.bayes_combine_df.append(combine, ignore_index=True)    def write_to_db(self):        self.bayes_combine_df.to_sql(name="ctop_ai_kuaishou_signature_recommended_target_combine",                                     con=engine,                                     if_exists='append',                                     index=False)if __name__ == '__main__':    # 1、读取配置文件    with open('/data/pythonProject/ai_target/config/config.yaml', mode='r', encoding='utf-8') as f:        config = yaml.load(f.read(), Loader=yaml.FullLoader)    # 1-1 数据库连接引擎,依据开发环境/生产环境 进行切换    if os.getenv('LYY_DEV', 'unknown') == 'dev':        engine = get_db_engine(config['devDB'])    else:        engine = get_db_engine(config['productDB'])    product_engine = get_db_engine(config['productDB'])    # 2、 参与定向组合的维度    target_dim = [key for key in config['bayesDim'].keys() if config['bayesDim'][key]['isOn']]    # 4、计算贝叶斯组合入库    for project_id in config['projectId']:        # 4.1 获取指定项目下当前活跃的素材信息, 如近3天内累计激活个数达到50个(开发环境或者生产环境,这部分都读取生产数据库)        sql = '''           select signature from ctop_kuaishou_report_daily_material            where account_id in (select account_id from ctop_user_allocation where project_id = %s)           and datediff(now(),stat_date) <= %s           group by signature           having sum(activation) >= %s           ''' % (project_id,                  config['activeMaterialFilterRule']['days'],                  config['activeMaterialFilterRule']['activation'])        df = pd.read_sql(sql, product_engine)        active_signatures = df[~df.signature.isnull()].signature.values        # 4.2 在素材人群报表筛选里面近一个月累计100个激活        sql = """        select signature from ctop_kuaishou_audience_report_daily_age_material          where signature in %s         and datediff(now(),stat_date) <= %s           group by signature           having sum(activation) >= %s        """ % (tuple(active_signatures),               config['getBaysCombineMaterialFilterRule']['days'],               config['getBaysCombineMaterialFilterRule']['activation'])        df = pd.read_sql(sql, product_engine)        signature_lst = df['signature'].values        # 这行代码用于测试        # signature_lst = ['4a56bc00ce51420f3a460ab51c6f5110']        # 4.2 计算指定素材的贝叶斯特征        for sig in signature_lst:            try:                for t_type in config['targetType']:                    bayes_feature_lst = []                    for dimension in target_dim:                        cls = BayesFeatures(sig, t_type, dimension, config['bayesDim'][dimension])                        # 计算滑窗组合                        cls.get_window_combine()                        cls.get_bayes_feature()                        bayes_feature_lst.append(cls.bayes_feature)                    # 依据特征值,计算多维度的组合预估值                    cls = BayesCombine(sig, project_id, t_type, bayes_feature_lst)                    cls.get_bayes_estimate()                    cls.write_to_db()                logger.info('project_id=%s, signature= %s 完成贝叶斯组合计算!' % (project_id, sig))            except:                logger.error('project_id=%s, signature=%s 贝叶斯组合计算出错,异常信息为%s!' % (project_id, sig, traceback.format_exc()))        logger.info('project_id is %s 完成贝叶斯组合计算!' % project_id)
 |