浏览代码

优质定向V0.2

liyuyi@c-top.com.cn 3 年之前
父节点
当前提交
319929f67c
共有 9 个文件被更改,包括 436 次插入555 次删除
  1. 1 1
      .idea/ai_target.iml
  2. 202 212
      BayesCombine.py
  3. 11 14
      ai_callback_handler.py
  4. 157 239
      ai_target_combine_handler.py
  5. 0 2
      ai_target_server.py
  6. 19 20
      config/config.yaml
  7. 12 2
      config/url.py
  8. 32 63
      time_task_create_ad_by_target.py
  9. 2 2
      utils/commonFunc.py

+ 1 - 1
.idea/ai_target.iml

@@ -4,7 +4,7 @@
     <content url="file://$MODULE_DIR$">
       <sourceFolder url="file://$MODULE_DIR$" isTestSource="false" />
     </content>
-    <orderEntry type="jdk" jdkName="Remote Python 3.8.10 (sftp://root@139.186.165.84:22/data/Miniconda3/envs/ai_target/bin/python)" jdkType="Python SDK" />
+    <orderEntry type="jdk" jdkName="ai_target_test_env" jdkType="Python SDK" />
     <orderEntry type="sourceFolder" forTests="false" />
   </component>
   <component name="PyDocumentationSettings">

+ 202 - 212
BayesCombine.py

@@ -1,21 +1,19 @@
+import datetime
 import json
-import yaml
-import os
-import pandas as pd
-import numpy as np
-from utils.commonFunc import update_dict, get_db_engine, city_code_transform, age_code_transform, gender_code_transform
-from functools import reduce
-from itertools import product
-from datetime import datetime
-from statsmodels.stats.proportion import proportions_ztest
+import logging
 import traceback
-import requests
-from utils.BaseClass import NpEncoder
-from config.url import estimate_people_number_url, headers
+import uuid
+from itertools import product
 
-import logging
+import pandas as pd
+import requests
+import yaml
 from concurrent_log import ConcurrentTimedRotatingFileHandler
 
+from config.url_and_db import estimate_people_number_url, headers, jeecg_db, jeecg_product_db, application_product_db
+from utils.BaseClass import NpEncoder
+from utils.commonFunc import get_db_engine, age_code_transform, gender_code_transform
+
 log_formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', '%Y/%m/%d %I:%M:%S %p')
 log_handler = ConcurrentTimedRotatingFileHandler("/data/pythonProject/ai_target/logs/BayesCombine.log", when="midnight", backupCount=100)
 log_handler.setFormatter(log_formatter)
@@ -25,22 +23,23 @@ logger.setLevel(logging.DEBUG)
 print('id of bayes_combine_logger %s' % id(logger))
 
 
-class BayesFeatures(object):
+class GetSingleDimFeatures(object):
     """
-    计算素材单维度的特征值
+    计算素材单维度滑窗组合转化占比情况
     """
 
-    def __init__(self, signature, target_type, dim, dim_config):
+    def __init__(self, signature, dim, dim_config):
         self.signature = signature
-        self.target_type = target_type
         self.dim = dim
         self.dim_config = dim_config
         self.table = self.dim_config['table']
-        self.file_name = self.dim_config['fieldName']
-        self.dim_lst = self.dim_config['Lst']
+        self.filed_name = self.dim_config['fieldName']
+        self.dim_value_lst = self.dim_config['Lst']
+        self.ratio_threshold = self.dim_config['ratio_threshold']
+        self.ratio_diff_threshold = self.dim_config['ratio_diff_threshold']
         self.window_size = self.dim_config['windowSize']
         self.window_combine = []
-        self.bayes_feature = {}
+        self.features = {}
 
     def get_window_combine(self):
         """
@@ -49,170 +48,181 @@ class BayesFeatures(object):
         得到 ['18-23岁', '24-30岁'],['24-30岁','31-40岁'],......
         """
         start = 0
-        while (start < len(self.dim_lst)) and (start + self.window_size <= len(self.dim_lst)):
-            self.window_combine.append(self.dim_lst[start: start + self.window_size])
+        while (start < len(self.dim_value_lst)) and (start + self.window_size <= len(self.dim_value_lst)):
+            self.window_combine.append(self.dim_value_lst[start: start + self.window_size])
             start += 1
 
         # 添加一个全部的组合,等同于'不限'
         self.window_combine.append(['不限'])
 
-    def get_bayes_feature(self):
-        # 1、获取指定素材在指定维度下的人群数据(近一个月内的数据表现)
-        sql = '''
-           select signature, 
-                  %s,
-                  sum(aclick) aclick,
-                  sum(bclick) bclick,
-                  sum(activation) activation
-           from %s
-           where signature = '%s' 
-           and datediff(now(),stat_date)<= %s
-           group by signature,  %s
-           ''' % (self.file_name, self.table, self.signature, config['getBaysCombineMaterialFilterRule']['days'], self.file_name)
-        df = pd.read_sql(sql, product_engine)
-        if self.target_type == 'action_ratio':
-            # 计算行为率(bclick/aclick)的组合特征值
-            df['unbclick'] = df['aclick'] - df['bclick']
-            for sub_combine in self.window_combine:
-                if sub_combine != ['不限']:
-                    key = self.dim + '_' + '|'.join(sub_combine)
-                    if self.dim != 'city':
-                        pos_pct = df[df[self.file_name].isin(sub_combine)].bclick.sum() / df.bclick.sum()
-                        neg_pct = df[df[self.file_name].isin(sub_combine)].unbclick.sum() / df.unbclick.sum()
-                    if self.dim == 'city':
-                        # 获取 city_level 对应的 city_name
-                        city_lst = []
-                        for city_level in sub_combine:
-                            get_city_sql = """
-                            select city_name from ctop_kuaishou_city_level where city_level = '%s'
-                            """ % city_level
-                            city_df = pd.read_sql(get_city_sql, product_engine)
-                            city_lst.extend(city_df['city_name'].values)
-                        pos_pct = df[df[self.file_name].isin(city_lst)].bclick.sum() / df.bclick.sum()
-                        neg_pct = df[df[self.file_name].isin(city_lst)].unbclick.sum() / df.unbclick.sum()
-                    self.bayes_feature[key] = {'pos_pct': pos_pct, 'neg_pct': neg_pct}
-                else:
-                    self.bayes_feature[self.dim + '_' + '不限'] = {'pos_pct': 1, 'neg_pct': 1}
-
-        if self.target_type == 'convert_ratio':
-            # 计算转化率(activation/bclick)的组合特征值
-            df['unactivation'] = df['bclick'] - df['activation']
-            for sub_combine in self.window_combine:
-                if sub_combine != '不限':
-                    key = self.dim + '_' + '|'.join(sub_combine)
-                    if self.dim != 'city':
-                        pos_pct = df[df[self.file_name].isin(sub_combine)].activation.sum() / df.activation.sum()
-                        neg_pct = df[df[self.file_name].isin(sub_combine)].unactivation.sum() / df.unactivation.sum()
-                    if self.dim == 'city':
-                        # 获取 city_level 对应的 city_name
-                        city_lst = []
-                        for city_level in sub_combine:
-                            get_city_sql = """
-                            select city_name from ctop_kuaishou_city_level where city_level = '%s'
-                            """ % city_level
-                            city_df = pd.read_sql(get_city_sql, product_engine)
-                            city_lst.extend(city_df['city_name'].values)
-                        pos_pct = df[df[self.file_name].isin(city_lst)].activation.sum() / df.activation.sum()
-                        neg_pct = df[df[self.file_name].isin(city_lst)].unactivation.sum() / df.unactivation.sum()
-                    self.bayes_feature[key] = {'pos_pct': pos_pct, 'neg_pct': neg_pct}
-                else:
-                    self.bayes_feature[self.dim + '_' + '不限'] = {'pos_pct': 1, 'neg_pct': 1}
-
-
-class BayesCombine(object):
+    def get_features(self):
+        # 1、获取素材在指定维度下的人群数据
+        get_audience_sql = f"select signature, {self.filed_name}, sum(activation) activation  " \
+                           f"from {self.table} where signature = '{self.signature}' group by signature, {self.filed_name}"
+        audience_df = pd.read_sql(get_audience_sql, jeecg_product_db)
+        for combine in self.window_combine:
+            if combine != ['不限']:
+                self.features['|'.join(combine)] = round(audience_df[audience_df[self.filed_name].isin(combine)].activation.sum() / \
+                                                         audience_df.activation.sum(), 4)
+            else:
+                if self.dim == 'gender':
+                    # 如果男/女 激活占比差异小于0.1, 说明素材的受众没有明显的性别倾向,可以对性别通投
+                    male_ratio = audience_df[audience_df['gender'] == '男'].activation.sum() / audience_df.activation.sum()
+                    female_ratio = audience_df[audience_df['gender'] == '女'].activation.sum() / audience_df.activation.sum()
+                    if abs(male_ratio - female_ratio) <= self.ratio_diff_threshold:
+                        self.features['不限'] = 1
+                if self.dim == 'age':
+                    self.features['不限'] = 1
+
+        # 过滤得到占比达到 ratio_threshold 的组合值
+        self.features = {f"{self.dim}_{combine}": ratio for combine, ratio in self.features.items() if ratio > self.ratio_threshold}
+
+
+class GetCompositeDimFeatures(object):
     """
-    依据特征值,计算多维度的组合预估值
+
     """
 
-    def __init__(self, signature, project_id, target_type, dim_features):
+    def __init__(self, signature, feature_lst, project_id):
         self.signature = signature
+        self.feature_lst = feature_lst
         self.project_id = project_id
-        self.target_type = target_type
-        self.dim_features = dim_features
-        self.bayes_combine_df = pd.DataFrame()
-        self.actual_prob = None
-        self.sample_size = None
-        self.sig_pos_pct = None
-        self.sig_neg_pct = None
-
-    def get_bayes_estimate(self):
-        dim_combine_lst = [[key for key in fea.keys()] for fea in self.dim_features]
-        self.dim_features = reduce(update_dict, self.dim_features)
-
-        # 通过 年龄对应的表: ctop_kuaishou_audience_daily_report_by_signature_age 来获取样本量,和正负比例值(其他表的数据可能会丢失导致不全)
-        get_sample_and_pct_sql = """
-        select sum(aclick) aclick, sum(bclick) bclick, sum(activation) activation  from %s  where
-         signature = '%s' 
-        and datediff(now(),stat_date)<= %s
-        """ % (config['bayesDim']['age']['table'], self.signature, config['getBaysCombineMaterialFilterRule']['days'])
-        sample_pct_df = pd.read_sql(get_sample_and_pct_sql, product_engine)
-        if self.target_type == 'action_ratio':
-            self.sig_pos_pct = sample_pct_df['bclick'].sum() / sample_pct_df['aclick'].sum()
-            self.sig_neg_pct = 1 - self.sig_pos_pct
-            self.actual_prob = self.sig_pos_pct
-            self.sample_size = int(sample_pct_df.aclick.sum())
-        elif self.target_type == 'convert_ratio':
-            self.sig_pos_pct = sample_pct_df['activation'].sum() / sample_pct_df['bclick'].sum()
-            self.sig_neg_pct = 1 - self.sig_pos_pct
-            self.actual_prob = self.sig_pos_pct
-            self.sample_size = int(sample_pct_df.bclick.sum())
 
-        for ele in product(*dim_combine_lst):
-            pos_pct_lst = [self.dim_features[key]['pos_pct'] for key in ele]
-            prob_pos = reduce(lambda x, y: x * y, pos_pct_lst)
-
-            neg_pct_lst = [self.dim_features[key]['neg_pct'] for key in ele]
-            prob_neg = reduce(lambda x, y: x * y, neg_pct_lst)
-
-            prob = (prob_pos * self.sig_pos_pct) / (prob_neg * self.sig_neg_pct)
-            out_dict = dict(zip([e.split('_')[0] for e in ele], [e.split('_')[-1] for e in ele]))
-
-            # 计算该组合的概率值 与 实际投放的概率值 是否存在显著性差异:显著好、 显著差
-            count = np.array([self.sample_size * prob, self.sample_size * self.actual_prob])
-            nobs = np.array([self.sample_size, self.sample_size])
-            z_score, p_value = proportions_ztest(count=count, nobs=nobs, value=None, alternative='two-sided', prop_var=False)
-            out_dict['z_score'] = z_score
-            out_dict['p_value'] = p_value
-            out_dict['sample_size'] = self.sample_size
+    def get_composite_features(self):
+        feature_combine_lst = [list(item.keys()) for item in self.feature_lst]
 
+        fea_combine_df = pd.DataFrame()
+        for ele in product(*feature_combine_lst):
+            fea_combine = dict(zip([e.split('_')[0] for e in ele], [e.split('_')[-1] for e in ele]))
+            fea_combine['unlimited_cnt'] = list(fea_combine.values()).count('不限')
+            fea_combine['features'] = str([(e.split('_')[0], e.split('_')[1], item[e]) for e in ele
+                                           for item in self.feature_lst if e in item.keys()])
+            # TODO 调用人群覆盖接口
             try:
-                # 获取该项目下在投的账号
-                get_acc_sql = """select account_id from ctop_user_allocation where project_id = %s and account_status=0 limit 1""" % \
-                              self.project_id
-                account_df = pd.read_sql(get_acc_sql, product_engine)
-                account_id = account_df['account_id'].values[0]
-                request_data = {'region': city_code_transform(out_dict.get('city')),
-                                'ages_range': age_code_transform(out_dict.get('age')),
-                                'gender': gender_code_transform(out_dict.get('gender')),
+                get_open_account_sql = f"select account_id from ctop_user_allocation where project_id = {self.project_id} and " \
+                                       f"account_status = 0 limit 1"
+                account_df = pd.read_sql(get_open_account_sql, jeecg_product_db)
+                account_id = account_df.account_id.values[0]
+                # TODO 测试用, 上线前需要注释掉
+                account_id = 9774238
+                request_data = {'ages_range': age_code_transform(fea_combine.get('age')),
+                                'gender': gender_code_transform(fea_combine.get('gender')),
                                 'advertiser_id': account_id}
-
                 request = requests.post(url=estimate_people_number_url,
                                         headers=headers,
                                         data=json.dumps(request_data, cls=NpEncoder))
                 response_data = json.loads(request.text)
                 if response_data['code'] == 0:
-                    out_dict['crowd_coverage_cnt'] = response_data['data'].get('audience_prediction_num')
-                else:
-                    logger.error("人群预估覆盖接口调用报错,请求数据为%s,返回数据为%s" % (str(request_data), str(response_data)))
+                    fea_combine['crowd_coverage_cnt'] = response_data['data'].get('audience_prediction_num')
             except:
-                logger.error("人群预估覆盖接口调用报错,请求数据为%s,请求地址为%s, 返回数据为%s, 异常信息为%s" %
-                             (str(request_data), estimate_people_number_url, str(response_data), traceback.format_exc()))
-
-            out_dict['combine_estimate_prob'] = prob
-            out_dict['actual_prob'] = self.actual_prob
-            out_dict['signature'] = self.signature
-            out_dict['project_id'] = self.project_id
-            out_dict['target_type'] = self.target_type
-            out_dict['stat_date'] = str(datetime.now().date())
-
-            combine = pd.DataFrame([out_dict])
-            self.bayes_combine_df = self.bayes_combine_df.append(combine, ignore_index=True)
-
-    def write_to_db(self):
-        self.bayes_combine_df.to_sql(name="ctop_ai_kuaishou_signature_recommended_target_combine",
-                                     con=engine,
-                                     if_exists='append',
-                                     index=False)
+                logger.error(f"人群预估覆盖接口调用报错,请求数据: {request_data}, 返回数据: {request.text}, 异常信息: {traceback.format_exc()}")
+
+            fea_combine_df = fea_combine_df.append(pd.DataFrame([fea_combine]), ignore_index=True)
+
+        # 计算结果写入数据库中
+        fea_combine_df['signature'] = self.signature
+        fea_combine_df['stat_date'] = str(datetime.datetime.now().date())
+        fea_combine_df['project_id'] = self.project_id
+        fea_combine_df.to_sql(name="ctop_ai_kuaishou_signature_recommended_target_combine_v2",
+                              con=jeecg_db,
+                              if_exists='append',
+                              index=False)
+
+
+# class BayesCombine(object):
+#     """
+#     依据特征值,计算多维度的组合预估值
+#     """
+#
+#     def __init__(self, signature, project_id, target_type, dim_features):
+#         self.signature = signature
+#         self.project_id = project_id
+#         self.target_type = target_type
+#         self.dim_features = dim_features
+#         self.bayes_combine_df = pd.DataFrame()
+#         self.actual_prob = None
+#         self.sample_size = None
+#         self.sig_pos_pct = None
+#         self.sig_neg_pct = None
+#
+#     def get_bayes_estimate(self):
+#         dim_combine_lst = [[key for key in fea.keys()] for fea in self.dim_features]
+#         self.dim_features = reduce(update_dict, self.dim_features)
+#
+#         # 通过 年龄对应的表: ctop_kuaishou_audience_daily_report_by_signature_age 来获取样本量,和正负比例值(其他表的数据可能会丢失导致不全)
+#         get_sample_and_pct_sql = """
+#         select sum(aclick) aclick, sum(bclick) bclick, sum(activation) activation  from %s  where
+#          signature = '%s'
+#         """ % (config['bayesDim']['age']['table'], self.signature)
+#         sample_pct_df = pd.read_sql(get_sample_and_pct_sql, product_engine)
+#         if self.target_type == 'action_ratio':
+#             self.sig_pos_pct = sample_pct_df['bclick'].sum() / sample_pct_df['aclick'].sum()
+#             self.sig_neg_pct = 1 - self.sig_pos_pct
+#             self.actual_prob = self.sig_pos_pct
+#             self.sample_size = int(sample_pct_df.aclick.sum())
+#         elif self.target_type == 'convert_ratio':
+#             self.sig_pos_pct = sample_pct_df['activation'].sum() / sample_pct_df['bclick'].sum()
+#             self.sig_neg_pct = 1 - self.sig_pos_pct
+#             self.actual_prob = self.sig_pos_pct
+#             self.sample_size = int(sample_pct_df.bclick.sum())
+#
+#         for ele in product(*dim_combine_lst):
+#             pos_pct_lst = [self.dim_features[key]['pos_pct'] for key in ele]
+#             prob_pos = reduce(lambda x, y: x * y, pos_pct_lst)
+#
+#             neg_pct_lst = [self.dim_features[key]['neg_pct'] for key in ele]
+#             prob_neg = reduce(lambda x, y: x * y, neg_pct_lst)
+#
+#             prob = (prob_pos * self.sig_pos_pct) / (prob_neg * self.sig_neg_pct)
+#             out_dict = dict(zip([e.split('_')[0] for e in ele], [e.split('_')[-1] for e in ele]))
+#
+#             # 计算该组合的概率值 与 实际投放的概率值 是否存在显著性差异:显著好、 显著差
+#             count = np.array([self.sample_size * prob, self.sample_size * self.actual_prob])
+#             nobs = np.array([self.sample_size, self.sample_size])
+#             z_score, p_value = proportions_ztest(count=count, nobs=nobs, value=None, alternative='two-sided', prop_var=False)
+#             out_dict['z_score'] = z_score
+#             out_dict['p_value'] = p_value
+#             out_dict['sample_size'] = self.sample_size
+#
+#             try:
+#                 # 获取该项目下在投的账号
+#                 get_acc_sql = """select account_id from ctop_user_allocation where project_id = %s and account_status=0 limit 1""" % \
+#                               self.project_id
+#                 account_df = pd.read_sql(get_acc_sql, product_engine)
+#                 account_id = account_df['account_id'].values[0]
+#                 account_id = 9774238
+#                 request_data = {'region': city_code_transform(out_dict.get('city')),
+#                                 'ages_range': age_code_transform(out_dict.get('age')),
+#                                 'gender': gender_code_transform(out_dict.get('gender')),
+#                                 'advertiser_id': account_id}
+#
+#                 request = requests.post(url=estimate_people_number_url,
+#                                         headers=headers,
+#                                         data=json.dumps(request_data, cls=NpEncoder))
+#                 response_data = json.loads(request.text)
+#                 if response_data['code'] == 0:
+#                     out_dict['crowd_coverage_cnt'] = response_data['data'].get('audience_prediction_num')
+#                 else:
+#                     logger.error("人群预估覆盖接口调用报错,请求数据为%s,返回数据为%s" % (str(request_data), str(response_data)))
+#             except:
+#                 logger.error("人群预估覆盖接口调用报错,请求数据为%s,请求地址为%s, 返回数据为%s, 异常信息为%s" %
+#                              (str(request_data), estimate_people_number_url, str(response_data), traceback.format_exc()))
+#
+#             out_dict['combine_estimate_prob'] = prob
+#             out_dict['actual_prob'] = self.actual_prob
+#             out_dict['signature'] = self.signature
+#             out_dict['project_id'] = self.project_id
+#             out_dict['target_type'] = self.target_type
+#             out_dict['stat_date'] = str(datetime.now().date())
+#
+#             combine = pd.DataFrame([out_dict])
+#             self.bayes_combine_df = self.bayes_combine_df.append(combine, ignore_index=True)
+#
+#     def write_to_db(self):
+#         self.bayes_combine_df.to_sql(name="ctop_ai_kuaishou_signature_recommended_target_combine",
+#                                      con=engine,
+#                                      if_exists='append',
+#                                      index=False)
 
 
 if __name__ == '__main__':
@@ -220,66 +230,46 @@ if __name__ == '__main__':
     with open('/data/pythonProject/ai_target/config/config.yaml', mode='r', encoding='utf-8') as f:
         config = yaml.load(f.read(), Loader=yaml.FullLoader)
 
-    # 1-1 数据库连接引擎,依据开发环境/生产环境 进行切换
-    if os.getenv('LYY_DEV', 'unknown') == 'dev':
-        engine = get_db_engine(config['devDB'])
-    else:
-        engine = get_db_engine(config['productDB'])
-
-    product_engine = get_db_engine(config['productDB'])
-
     # 2、 参与定向组合的维度
     target_dim = [key for key in config['bayesDim'].keys() if config['bayesDim'][key]['isOn']]
 
     # 4、计算贝叶斯组合入库
     for project_id in config['projectId']:
-        # 4.1 获取指定项目下当前活跃的素材信息, 如近3天内累计激活个数达到50个(开发环境或者生产环境,这部分都读取生产数据库)
-        sql = '''
-           select signature from ctop_kuaishou_report_daily_material 
-           where account_id in (select account_id from ctop_user_allocation where project_id = %s)
-           and datediff(now(),stat_date) <= %s
-           group by signature
-           having sum(activation) >= %s
-           ''' % (project_id,
-                  config['activeMaterialFilterRule']['days'],
-                  config['activeMaterialFilterRule']['activation'])
-        df = pd.read_sql(sql, product_engine)
-        active_signatures = df[~df.signature.isnull()].signature.values
-
-        # 4.2 在素材人群报表筛选里面近一个月累计100个激活
-        sql = """
-        select signature from ctop_kuaishou_audience_report_daily_age_material  
-        where signature in %s
-         and datediff(now(),stat_date) <= %s
-           group by signature
-           having sum(activation) >= %s
-        """ % (tuple(active_signatures),
-               config['getBaysCombineMaterialFilterRule']['days'],
-               config['getBaysCombineMaterialFilterRule']['activation'])
-        df = pd.read_sql(sql, product_engine)
+        # 4.1 获取指定项目下当前活跃的素材信息
+        # 如近60天内累计激活个数达到100个(开发环境或者生产环境,这部分都读取生产数据库)
+        start_date = datetime.datetime.now().date() + datetime.timedelta(days=-config['activeMaterialFilterRule']['days'])
+        sql = f"select signature from kuaishou_material_video_report_daily_dw where project_id = {project_id} " \
+              f"and stat_date >= {start_date.year * 10000 + start_date.month * 100 + start_date.day} group by signature " \
+              f"having sum(activation) >= {config['activeMaterialFilterRule']['activation']}"
+        df = pd.read_sql(sql, application_product_db)
+        active_signatures = list(df[~df.signature.isnull()].signature.values)
+        active_signatures = active_signatures * 2 if len(active_signatures) == 1 else active_signatures
+
+        # 4.2 在素材人群报表筛选里面累计100个激活
+        sql = f"select signature from ctop_kuaishou_audience_report_daily_age_material where signature in {tuple(active_signatures)} " \
+              f"group by signature having sum(activation) >= {config['getBaysCombineMaterialFilterRule']['activation']}"
+        df = pd.read_sql(sql, jeecg_product_db)
         signature_lst = df['signature'].values
 
         # 这行代码用于测试
-        # signature_lst = ['4a56bc00ce51420f3a460ab51c6f5110']
+        # signature_lst = ['f7adee9229e58176d132cf0bd7590a95']
 
-        # 4.2 计算指定素材的贝叶斯特征
+        # 4.2 计算指定素材定向
         for sig in signature_lst:
             try:
-                for t_type in config['targetType']:
-                    bayes_feature_lst = []
-                    for dimension in target_dim:
-                        cls = BayesFeatures(sig, t_type, dimension, config['bayesDim'][dimension])
-                        # 计算滑窗组合
-                        cls.get_window_combine()
-                        cls.get_bayes_feature()
-                        bayes_feature_lst.append(cls.bayes_feature)
-
-                    # 依据特征值,计算多维度的组合预估值
-                    cls = BayesCombine(sig, project_id, t_type, bayes_feature_lst)
-                    cls.get_bayes_estimate()
-                    cls.write_to_db()
+                feature_lst = []
+                for dimension in target_dim:
+                    get_single_fea_ins = GetSingleDimFeatures(sig, dimension, config['bayesDim'][dimension])
+                    # 计算滑窗组合
+                    get_single_fea_ins.get_window_combine()
+                    get_single_fea_ins.get_features()
+                    feature_lst.append(get_single_fea_ins.features)
+
+                # 依据特征值,计算多维度的组合预估值
+                get_composite_fea_ins = GetCompositeDimFeatures(sig, feature_lst, project_id)
+                get_composite_fea_ins.get_composite_features()
                 logger.info('project_id=%s, signature= %s 完成贝叶斯组合计算!' % (project_id, sig))
             except:
                 logger.error('project_id=%s, signature=%s 贝叶斯组合计算出错,异常信息为%s!' % (project_id, sig, traceback.format_exc()))
 
-        logger.info('project_id is %s 完成贝叶斯组合计算!' % project_id)
+    logger.info('project_id is %s 完成贝叶斯组合计算!' % project_id)

+ 11 - 14
ai_callback_handler.py

@@ -1,28 +1,25 @@
 import json
+import os
 import traceback
-import tornado.web
+
 import pymysql
-import logging
-from concurrent_log import ConcurrentTimedRotatingFileHandler
+import tornado.web
 import yaml
-import os
+from loguru import logger
 
-log_formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', '%m/%d/%Y %I:%M:%S %p')
-log_handler = ConcurrentTimedRotatingFileHandler("/data/pythonProject/ai_target/logs/ai_callback.log",
-                                       when="midnight", backupCount=100)
-log_handler.setFormatter(log_formatter)
-logger = logging.getLogger('ai_callback_logger')
-logger.addHandler(log_handler)
-logger.setLevel(logging.DEBUG)
-print('id of ai_callback_logger %s' % id(logger))
+logger.remove()  # 删去 import logger之后自动产生的handler,不删除的话会出现重复输出的现象
+logger.add("/data/pythonProject/ai_target/logs/ai_callback.{time:YYYY-MM-DD}.log",
+           rotation="00:00",
+           format="{time:YYYY-MM-DD HH:mm:ss,SSS} [{process}] [{thread}] {level} {file} {line} - {message}",
+           level="INFO")
 logger.info("ai_callback_server started!")
 
 with open('/data/pythonProject/ai_target/config/config.yaml', mode='r', encoding='utf-8') as f:
     config = yaml.load(f.read(), Loader=yaml.FullLoader)
 if os.getenv('LYY_DEV', 'unknown') == 'dev':
-    db_info = config['devDB']
+    db_info = config['jeecg_dev_db']
 else:
-    db_info = config['productDB']
+    db_info = config['jeecg_product_db']
 
 
 class AiCallBackAddCreative(tornado.web.RequestHandler):

+ 157 - 239
ai_target_combine_handler.py

@@ -1,97 +1,60 @@
-import uuid
 import datetime
-import tornado
 import json
-import traceback
-import pandas as pd
+import random
+import uuid
+
 import numpy as np
+import pandas as pd
 import pymysql
-import os
-import yaml
-import random
 import requests
-from utils.commonFunc import get_db_engine, is_contains_gender, is_contains_age, is_contains_region
+import yaml
+from loguru import logger
+
+from config.url_and_db import create_campaign_url, create_group_and_creative_url, headers, \
+    update_campaign_status_url, jeecg_db, jeecg_product_db
 from utils.BaseClass import NpEncoder
-from config.url import create_campaign_url, create_group_and_creative_url, headers, update_campaign_status_url
-import logging
-from concurrent_log import ConcurrentTimedRotatingFileHandler
-
-log_formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', '%Y/%m/%d %I:%M:%S %p')
-log_handler = ConcurrentTimedRotatingFileHandler("/data/pythonProject/ai_target/logs/ai_target_combine.log", when="midnight", backupCount=100)
-log_handler.setFormatter(log_formatter)
-logger = logging.getLogger('ai_strategy_request_logger')
-logger.addHandler(log_handler)
-logger.setLevel(logging.DEBUG)
-logger.info("ai_target_combine_logger started!")
-print('id of ai_target_combine_logger %s' % id(logger))
+from utils.commonFunc import is_contains_gender, is_contains_age, is_contains_region
 
 with open('/data/pythonProject/ai_target/config/config.yaml', mode='r', encoding='utf-8') as f:
     config = yaml.load(f.read(), Loader=yaml.FullLoader)
 
 
-class AiTargetCombine(tornado.web.RequestHandler):
-    def post(self):
-        # 解析接收到的参数
-        data = self.request.body
-        data = str(data, 'utf8')
-        data = json.loads(data, encoding='utf8')
-        account_id = data['account_id']
-        project_id = data['project_id']
-        logger.info("账户信息=%s, 当前进程=%s, 当前进程的主进程=%s" % (account_id, os.getpid(), os.getppid()))
-        try:
-            # 1、创建类实例
-            ins = GetTargetAndAssemblyParameters(account_id, project_id)
-
-            # 2、判断target_combine_to_create是否为空,为空则不执行后续的操作
-            if not ins.target_combine_to_create:
-                logger.info("account_id = %s, message = '没有定向组合: 定向冲突或者素材关联的创意个数达到上限' " % ins.account_id)
-                self.write(json.dumps({"account_id": account_id, "message": "没有定向组合: 定向冲突或者素材关联的创意个数达到上限", "code": 1}))
-                self.flush()
-            else:
-                # 2、判断当天是否已经存在包含【优质定向】的广告计划,如果有则获取该计划id,否则需要新建计划
-                sql = """select campaign_id, campaign_name from ctop_kuaishou_campaign 
-                        where account_id = %s
-                        and put_create_time >= curdate()
-                        and campaign_name like '%s'""" % (ins.account_id, '%%优质定向%%')
-                df = pd.read_sql(sql, ins.engine)
-                if df.empty:
-                    ins.add_campaign()
-                else:
-                    ins.campaign_id = df['campaign_id'].values[0]
-
-                # 3、拼装广告组和创意的参数
-                ins.assembly_group_and_creative_params()
-
-                # 4、信息写如到智能策略表中
-                ins.write_intelligence_strategy_table()
-
-                # 5、
-
-                # 5、发送创建组和创意的请求
-                request = requests.post(create_group_and_creative_url,
-                                        headers=headers,
-                                        data=json.dumps(ins.request_data, cls=NpEncoder))
-                response_data = json.loads(request.text)
-
-                # 6、接口的返回信息更新到数据库表
-                ins.update_intelligence_strategy_table(message=response_data)
-
-                logger.info("account_id = %s, 策略uuid = %s 的请求返回信息:%s" % (ins.account_id, ins.ai_strategy_uuid, response_data))
-                self.write(json.dumps({"account_id": account_id, "message": str(response_data), "code": 1}))
-                self.flush()
-        except:
-            logger.error("account_id = %s, traceback is %s" % (account_id, traceback.format_exc()))
-            self.write(json.dumps({"account_id": account_id, "message": str(traceback.format_exc()), "code": -1}))
-            self.flush()
-
-
-class GetTargetAndAssemblyParameters(object):
+@logger.catch()
+def ai_target_combine_create(account_id: int, project_id: int):
+    logger.info(f"account_id = {account_id}, project_id = {project_id} 开始创建优质定向...")
+    ins = GetTargetAndCreateAds(account_id, project_id)
+
+    # 2、判断 target_combine_to_create 是否为空,为空则不执行后续的操作
+    if not ins.target_combine_to_create:
+        logger.info(f"account_id = {account_id},  project_id = {project_id}, "
+                    f"message = 没有定向组合: 定向冲突或者素材关联的创意个数达到上限, 创建优质定向已结束 ")
+    else:
+        # 创建或获取计划信息
+        ins.add_campaign()
+
+        # 3、拼装广告组和创意的参数
+        ins.assembly_group_and_creative_params()
+
+        # 4、信息写如到智能策略表中
+        ins.write_intelligence_strategy_table()
+
+        # 5、发送创建组和创意的请求
+        request = requests.post(create_group_and_creative_url,
+                                headers=headers,
+                                data=json.dumps(ins.request_data, cls=NpEncoder))
+        response_data = json.loads(request.text)
+
+        # 6、接口的返回信息更新到数据库表
+        ins.update_intelligence_strategy_table(message=response_data)
+
+        logger.info(f"account_id = {account_id}, 策略uuid = {ins.ai_strategy_uuid} 的请求返回信息:{response_data}")
+    return
+
+
+class GetTargetAndCreateAds(object):
     def __init__(self, account_id, project_id):
         self.account_id = account_id
         self.project_id = project_id
-        self.engine = None
-        self.product_engine = None
-        self.db_config = None
         self.advertiser_strategy_id = None
         self.advertiser_strategy = {}
         self.ai_strategy_uuid = str(uuid.uuid4())
@@ -101,42 +64,23 @@ class GetTargetAndAssemblyParameters(object):
         self.target_combine_from_table = []
         self.target_combine_filter_by_subset = []
         self.target_combine_to_create = []
-        self.get_database_engine()  # 获取数据库信息
         self.get_advertiser_strategy_info()  # 获取账户配置信息
         self.get_signature_and_target()  # 从素材定向组合表中获取最新的定向组合
         self.filter_target_combine_by_whether_is_subset()  # 筛选出为账户配置子集的定向组合
         self.filter_target_combine_by_creative_cnt()  # 依据素材在指定账户下关联的创意个数,进行筛选创建个数,防止超限导致的创建失败
 
-    def get_database_engine(self):
-        # 数据库连接引擎,依据开发环境/生产环境 进行切换
-        if os.getenv('LYY_DEV', 'unknown') == 'dev':
-            self.engine = get_db_engine(config['devDB'])
-            self.db_config = config['devDB']
-        else:
-            self.engine = get_db_engine(config['productDB'])
-            self.db_config = config['productDB']
-
-        # 有些表需要连接生产数据库,不管当前是开发环境还是测试环境
-        self.product_engine = get_db_engine(config['productDB'])
-
     def get_advertiser_strategy_info(self):
         """
-        获取指定账户下客户的策略信息(不管是开发环境还是生产环境,该表都读取生产数据库)
-        为后续的写入库表,校验一致性做准备
+        获取指定账户下客户的策略信息(不管是开发环境还是生产环境,该表都读取生产数据库) 为后续的写入库表,校验一致性做准备
         """
-        sql = """
-                select * 
-                from ctop_ai_kuaishou_advertiser_strategy 
-                where account_id = %d
-                limit 1
-                """ % self.account_id
-        advertiser_strategy_df = pd.read_sql(sql, self.product_engine)
+        sql = f"select * from ctop_ai_kuaishou_advertiser_strategy where account_id = {self.account_id} and status = 1 limit 1"
+        advertiser_strategy_df = pd.read_sql(sql, jeecg_product_db)
         self.advertiser_strategy_id = int(advertiser_strategy_df['id'].values[0])
         self.advertiser_strategy = advertiser_strategy_df.T.to_dict()[0]
 
     def add_campaign(self):
         """
-        新增广告计划,如果存在则跳过
+        广告计划,如果存在则获取campaign_id, 否则新建广告计划。
         response_data = {
             "code": 0,
             "data": {
@@ -146,106 +90,90 @@ class GetTargetAndAssemblyParameters(object):
             },
             "message": "SUCCESS"}
         """
-        # 1、请求创建广告计划的接口
-        campaign_name = self.advertiser_strategy['campaign_name']
-        campaign_name = campaign_name.replace('{{自定义}}', '')
-        campaign_name = campaign_name.replace('{{日期}}', '')
-        campaign_name += '_优质定向_'
-        campaign_name += str(datetime.datetime.now().date())
-        create_campaign_request_data = {'account_id': self.account_id,
-                                        'campaign_name': campaign_name,
-                                        'type': self.advertiser_strategy['campaign_type'],
-                                        'day_budget': self.advertiser_strategy['campaign_day_budget'],
-                                        'day_budget_schedule': eval(self.advertiser_strategy['campaign_day_budget_schedule'])
-                                        if self.advertiser_strategy['campaign_day_budget_schedule'] else None}
-        request = requests.post(url=create_campaign_url,
-                                headers=headers,
-                                data=json.dumps(create_campaign_request_data, cls=NpEncoder))
-        response_data = json.loads(request.text)
-
-        # 2、对接口返回的结果处理
-        campaign_info_to_db = {
-            'campaign_uuid': str(uuid.uuid4()),
-            'account_id': self.account_id,
-            'ai_strategy_uuid': self.ai_strategy_uuid,
-            'campaign_name': campaign_name,
-            'campaign_type': self.advertiser_strategy['campaign_type'],
-            'day_budget': self.advertiser_strategy['campaign_day_budget'],
-            'day_budget_schedule': self.advertiser_strategy['campaign_day_budget_schedule'],
-            'create_time': datetime.datetime.now()
-        }
-        res = 0
-        if response_data['code'] == 0:
-            self.campaign_id = response_data['data'].get('campaign_id', None)
-            campaign_info_to_db['campaign_id'] = self.campaign_id
-            campaign_info_to_db['campaign_create_time'] = response_data['data'].get('campaign_create_time', None)
-            campaign_info_to_db['status'] = response_data.get('code', None)
-            campaign_info_to_db['message'] = response_data.get('message', None)
-
-            # self.advertiser_strategy['campaign_status'] 如果为暂停,需要创建成功后,调用修改广告计划状态的接口
-            # `campaign_status` int(11) DEFAULT '1' COMMENT '计划状态: 1:投放 2:暂停 '
-            if self.advertiser_strategy['campaign_status'] == 2:
-                update_campaign_status_request = requests.post(url=update_campaign_status_url,
-                                                               headers=headers,
-                                                               data=json.dumps({'accountId': self.account_id,
-                                                                                'putStatus': 2,
-                                                                                'userId': '113dee46c7df464da78c07a985e92cd1',
-                                                                                'campaignIds': [self.campaign_id]},
-                                                                               cls=NpEncoder))
-                update_campaign_status_response_data = json.loads(update_campaign_status_request.text)
-                if update_campaign_status_response_data.get('result') and \
-                        update_campaign_status_response_data.get('result').get('failCount') == 0:
-                    logger.info("账户 %s 下的广告计划 %s 暂停成功!" % (self.account_id, self.campaign_id))
-                else:
-                    logger.error("账户 %s 下的广告计划 %s 暂停失败,异常信息: %s!" %
-                                 (self.account_id,
-                                  self.campaign_id,
-                                  str(update_campaign_status_response_data.get('result'))))
-                    raise Exception("账户 %s 下的广告计划 %s 暂停失败!" % (self.account_id, self.campaign_id))
+        # 判断当天是否已经存在包含【优质定向】的广告计划,如果有则获取该计划id,否则需要新建计划
+        sql = f"select campaign_id from ctop_kuaishou_campaign where account_id = {self.account_id} " \
+              f"and  put_create_time >= curdate() and campaign_name like '%%优质定向V0.2%%'"
+        campaign_df = pd.read_sql(sql, jeecg_db)
+        if not campaign_df.empty:
+            self.campaign_id = campaign_df['campaign_id'].values[0]
         else:
-            campaign_info_to_db['message'] = response_data['message']
-            campaign_info_to_db['status'] = response_data['code']
-            res = -1
-            raise Exception("账户 %s,新建广告计划失败,详细:%s!" % (self.account_id, str(response_data)))
-
-        # 3、写入计划层级的操作表
-        df = pd.DataFrame.from_dict(campaign_info_to_db, orient='index').T
-        df.to_sql(name="ctop_ai_kuaishou_campaign_level_operation_record",
-                  con=self.engine,
-                  if_exists='append',
-                  index=False)
-        return res
+            # 请求创建广告计划的接口
+            campaign_name = self.advertiser_strategy['campaign_name']
+            campaign_name = campaign_name.replace('{{自定义}}', '')
+            campaign_name = campaign_name.replace('{{日期}}', '')
+            campaign_name += '_优质定向V0.2_'
+            campaign_name += str(datetime.datetime.now().date())
+            create_campaign_request_data = {'account_id': self.account_id,
+                                            'campaign_name': campaign_name,
+                                            'type': self.advertiser_strategy['campaign_type'],
+                                            'day_budget': self.advertiser_strategy['campaign_day_budget'],
+                                            'day_budget_schedule': eval(self.advertiser_strategy['campaign_day_budget_schedule'])
+                                            if self.advertiser_strategy['campaign_day_budget_schedule'] else None}
+            request = requests.post(url=create_campaign_url,
+                                    headers=headers,
+                                    data=json.dumps(create_campaign_request_data, cls=NpEncoder))
+            response_data = json.loads(request.text)
+
+            # 2、对接口返回的结果处理
+            campaign_info_to_db = {
+                'campaign_uuid': str(uuid.uuid4()),
+                'account_id': self.account_id,
+                'ai_strategy_uuid': self.ai_strategy_uuid,
+                'campaign_name': campaign_name,
+                'campaign_type': self.advertiser_strategy['campaign_type'],
+                'day_budget': self.advertiser_strategy['campaign_day_budget'],
+                'day_budget_schedule': self.advertiser_strategy['campaign_day_budget_schedule'],
+                'create_time': datetime.datetime.now()
+            }
+            if response_data['code'] == 0:
+                self.campaign_id = response_data['data'].get('campaign_id', None)
+                campaign_info_to_db['campaign_id'] = self.campaign_id
+                campaign_info_to_db['campaign_create_time'] = response_data['data'].get('campaign_create_time', None)
+                campaign_info_to_db['status'] = response_data.get('code', None)
+                campaign_info_to_db['message'] = response_data.get('message', None)
+
+                # self.advertiser_strategy['campaign_status'] 如果为暂停,需要创建成功后,调用修改广告计划状态的接口
+                # `campaign_status` int(11) DEFAULT '1' COMMENT '计划状态: 1:投放 2:暂停 '
+                if self.advertiser_strategy['campaign_status'] == 2:
+                    update_campaign_status_request = requests.post(url=update_campaign_status_url,
+                                                                   headers=headers,
+                                                                   data=json.dumps({'accountId': self.account_id,
+                                                                                    'putStatus': 2,
+                                                                                    'userId': '113dee46c7df464da78c07a985e92cd1',
+                                                                                    'campaignIds': [self.campaign_id]},
+                                                                                   cls=NpEncoder))
+                    update_campaign_status_response_data = json.loads(update_campaign_status_request.text)
+                    if update_campaign_status_response_data.get('result') and \
+                            update_campaign_status_response_data.get('result').get('failCount') == 0:
+                        logger.info(f"账户:{self.account_id} 下的广告计划 {self.campaign_id} 暂停成功!")
+                    else:
+                        logger.error(f"账户:{self.account_id} 下的广告计划 {self.campaign_id} 暂停失败,"
+                                     f"异常信息: {str(update_campaign_status_response_data.get('result'))}")
+                        raise Exception(f"账户:{self.account_id} 下的广告计划 {self.campaign_id} 暂停失败!")
+            else:
+                campaign_info_to_db['message'] = response_data['message']
+                campaign_info_to_db['status'] = response_data['code']
+                raise Exception(f"账户:{self.account_id}, 新建广告计划失败,详细:{str(response_data)}!")
+
+            # 3、写入计划层级的操作表
+            df = pd.DataFrame.from_dict(campaign_info_to_db, orient='index').T
+            df.to_sql(name="ctop_ai_kuaishou_campaign_level_operation_record",
+                      con=jeecg_db,
+                      if_exists='append',
+                      index=False)
 
     def get_signature_and_target(self):
         """
         从 ctop_ai_kuaishou_signature_recommended_target_combine 表中读取素材和对应的定向
         读取配置文件的条件,筛选出符合条件的定向组合
         """
-        sql = """
-        select * from ctop_ai_kuaishou_signature_recommended_target_combine where project_id = %s 
-        and stat_date = (select max(stat_date) from ctop_ai_kuaishou_signature_recommended_target_combine)
-        """ % self.project_id
-        df = pd.read_sql(sql, self.engine)
-
-        # 计算组合的概率,相对于实际投放概率高出了百分之多少,样本量是否达标(读取配置文件),且p值小于等于0.05
-        df['improve_ratio'] = (df['combine_estimate_prob'] - df['actual_prob']) / df['actual_prob']
-
-        # 两种类型的过滤标准不一样,分开进行判断,然后对结果进行合并
-        df_1 = df[(df['target_type'] == 'action_ratio') &
-                  (df['improve_ratio'] >= config['filterTargetCombine']['actionRatio']['improveRatio']) &
-                  (df['sample_size'] >= config['filterTargetCombine']['actionRatio']['sampleSize']) &
-                  (df['p_value'] <= 0.05)]
-
-        df_2 = df[(df['target_type'] == 'convert_ratio') &
-                  (df['improve_ratio'] >= config['filterTargetCombine']['convertRatio']['improveRatio']) &
-                  (df['sample_size'] >= config['filterTargetCombine']['convertRatio']['sampleSize']) &
-                  (df['p_value'] <= 0.05)]
-
-        merge_df = pd.concat([df_1, df_2], axis=0)
+        sql = f"select * from ctop_ai_kuaishou_signature_recommended_target_combine_v2 where project_id = {self.project_id} and " \
+              f"stat_date = (select max(stat_date) from ctop_ai_kuaishou_signature_recommended_target_combine_v2)"
+        target_df = pd.read_sql(sql, jeecg_db)
 
-        # 'age', 'gender', 'city', 'business', 'province', 'client'
+        # 'age', 'gender', 'city'
         features = [key for key, value in config['bayesDim'].items() if value['isOn']]
-        self.target_combine_from_table = merge_df[['id', 'signature'] + features].to_dict(orient='records')
+        self.target_combine_from_table = target_df[['id', 'signature'] + features].to_dict(orient='records')
 
     def write_intelligence_strategy_table(self):
         """
@@ -257,12 +185,12 @@ class GetTargetAndAssemblyParameters(object):
                 'advertiser_strategy_id': self.advertiser_strategy_id,
                 'account_id': self.account_id,
                 'ai_strategy_request_content': str(self.request_data),
-                'ai_strategy_remark': '优质定向组合',
+                'ai_strategy_remark': '优质定向组合V0.2',
                 'create_time': datetime.datetime.now()
             }
         df = pd.DataFrame.from_dict(intelligence_strategy_dict, orient='index').T
         df.to_sql(name="ctop_ai_kuaishou_intelligence_strategy",
-                  con=self.engine,
+                  con=jeecg_db,
                   if_exists='append',
                   index=False)
 
@@ -277,7 +205,6 @@ class GetTargetAndAssemblyParameters(object):
         gender_bool = True
         age_bool = True
         region_bool = True
-
         for item in self.target_combine_from_table:
             if 'gender' in item.keys():
                 gender_bool, gender_dict = is_contains_gender(item['gender'], self.advertiser_strategy['gender'])
@@ -296,21 +223,24 @@ class GetTargetAndAssemblyParameters(object):
                 target_combine.update(region_dict) if 'city' in item.keys() else None
                 self.target_combine_filter_by_subset.append(target_combine)
             else:
-                logger.info("推荐定向:%s 与 账户 %s 配置信息里的定向(gender:%s, age_min: %s,age_max: %s, ages_ranges:%s,region:%s )存在冲突" %
-                            (item,
-                             self.account_id,
-                             self.advertiser_strategy['gender'],
-                             self.advertiser_strategy['age_min'],
-                             self.advertiser_strategy['age_max'],
-                             self.advertiser_strategy['ages_range'],
-                             self.advertiser_strategy['region']))
+                if not gender_bool:
+                    logger.info(f"优质定向中的gender:{item.get('gender')}, "
+                                f"与账户:{self.account_id} 配置信息里的gender: {self.advertiser_strategy['gender']} 存在冲突")
+                if not age_bool:
+                    logger.info(f"优质定向中的age:{item.get('age')}, "
+                                f"与账户:{self.account_id} 配置信息里的age: "
+                                f"{self.advertiser_strategy['age_min'], self.advertiser_strategy['age_max'], self.advertiser_strategy['ages_range']}"
+                                f"存在冲突")
+                if not region_bool:
+                    logger.info(f"优质定向中的region:{item.get('city')}, "
+                                f"与账户:{self.account_id} 配置信息里的region: {self.advertiser_strategy['region']} 存在冲突")
 
     def filter_target_combine_by_creative_cnt(self):
         """
         依据素材已经关联的创意个数,来定素材可以创建的定向组合个数, 并从中随机随着N个
         """
         if not self.target_combine_filter_by_subset:
-            return None
+            return
 
         # 1-1 获取素材在该账户下关联的创意个数,过滤掉素材关联创意个数超过200的情况
         df = pd.DataFrame(self.target_combine_filter_by_subset)
@@ -318,23 +248,13 @@ class GetTargetAndAssemblyParameters(object):
         # 会导致后续拼装的参数发送给java服务报错(jackson.core.JsonParseException: Non-standard token \'NaN\':)
         # 需要把 np.nan 替换为 None
         df.replace({np.nan: None}, inplace=True)
-        # signature in ('03b93728f0d82ea7865c3c7cf632bc1b',),避免这样的sql报错
-        if df.signature.nunique() == 1:
-            sig_lst = tuple(list(df.signature.unique()) * 2)
-        else:
-            sig_lst = tuple(list(df.signature.unique()))
-
-        sql = """
-                select t1.signature, t2.creative_count
-                from
-                (select account_id, signature, photo_id from ctop_kuaishou_video_get 
-                 where account_id = %s and signature in %s 
-                 group by account_id, signature) t1
-                left join
-                ctop_kuaishou_video_relate_creatives t2
-                on t1.account_id = t2.account_id and t1.photo_id = t2.photo_id 
-                """ % (self.account_id, sig_lst)
-        creative_cnt_df = pd.read_sql(sql, self.product_engine)
+        # signature in ('03b93728f0d82ea7865c3c7cf632bc1b',), 避免这样的sql报错
+        sig_lst = tuple(list(df.signature.unique()) * 2) if df.signature.nunique() == 1 else tuple(list(df.signature.unique()))
+        sql = f"select t1.signature, t2.creative_count " \
+              f"from (select account_id, signature, photo_id from ctop_kuaishou_video_get where account_id = {self.account_id} " \
+              f"and signature in {sig_lst} ) t1 left join ctop_kuaishou_video_relate_creatives t2 " \
+              f"on t1.account_id = t2.account_id and t1.photo_id = t2.photo_id "
+        creative_cnt_df = pd.read_sql(sql, jeecg_product_db)
         creative_cnt_df = creative_cnt_df[creative_cnt_df.creative_count < 200]
 
         # 1-2 计算每个素材还能创建的广告组(定向)个数: (200 - 已关联创意个数) / 15
@@ -387,15 +307,14 @@ class GetTargetAndAssemblyParameters(object):
             group_name = group_params['unit_name']
 
             # 获取素材名称, 在广告组命名中进行替换 (同时获取photo_id, 用于拼装创意层级的参数)
-            sql = """select photo_id, photo_name, material_type 
-            from ctop_kuaishou_video_get where account_id = %s and signature = '%s' limit 1""" % \
-                  (self.account_id, item['signature'])
-            signature_df = pd.read_sql(sql, self.product_engine)
+            sql = f"select photo_id, photo_name, material_type from ctop_kuaishou_video_get where account_id = {self.account_id} and " \
+                  f"signature = '{item['signature']}' limit 1"
+            signature_df = pd.read_sql(sql, jeecg_product_db)
             photo_name = signature_df['photo_name'].values[0] if not signature_df.empty else 'unknown'
             photo_id = signature_df['photo_id'].values[0] if not signature_df.empty else None
             material_type = signature_df['material_type'].values[0] if not signature_df.empty else None
             group_name.replace('{{素材名称}}', photo_name)
-            group_name = group_name + '_优质定向' + '_' + str(group_cnt)
+            group_name = group_name + '_优质定向V0.2' + '_' + str(group_cnt)
 
             # 写入数据表中组信息
             single_group_params_to_db = group_params_to_db.copy()
@@ -441,14 +360,14 @@ class GetTargetAndAssemblyParameters(object):
         # 创意层级信息批量写入数据库
         creative_df = pd.DataFrame(batch_creative_params_to_db)
         creative_df.to_sql(name="ctop_ai_kuaishou_creative_level_operation_record",
-                           con=self.engine,
+                           con=jeecg_product_db,
                            if_exists='append',
                            index=False)
 
         # 组层级信息批量写入数据库
         group_df = pd.DataFrame(batch_group_params_to_db)
         group_df.to_sql(name="ctop_ai_kuaishou_unit_level_operation_record",
-                        con=self.engine,
+                        con=jeecg_product_db,
                         if_exists='append',
                         index=False)
 
@@ -538,8 +457,6 @@ class GetTargetAndAssemblyParameters(object):
                         'behavior_interest': self.advertiser_strategy.get('behavior_interest')
                         }
 
-        group_params['unit_name'] = group_params['unit_name'].replace('{{自定义}}', '')
-
         # 2、smart_cover 和 asset_mining 需要将0/1 转化为 False/True
         group_params['smart_cover'] = True if group_params.get('smart_cover', 0) == 1 else False
         group_params['asset_mining'] = True if group_params.get('asset_mining', 0) == 1 else False
@@ -549,7 +466,7 @@ class GetTargetAndAssemblyParameters(object):
         # 3-1 获取应用标记和应用名称
         sql = """select app_version, app_name from ctop_kuaishou_app_list where app_id = %s limit 1""" % \
               group_params.get('app_id', -1)
-        app_info_df = pd.read_sql(sql, self.engine)
+        app_info_df = pd.read_sql(sql, jeecg_product_db)
         app_version = app_info_df['app_version'].values[0] if not app_info_df.empty else 'unknown'
         app_name = app_info_df['app_name'].values[0] if not app_info_df.empty else 'unknown'
         # 3-2 素材名称, 在拼装广告组和创意层级函数中获取
@@ -578,6 +495,7 @@ class GetTargetAndAssemblyParameters(object):
         else:
             unit_name = unit_name + '_' + now_date
         group_params['unit_name'] = unit_name
+        group_params['unit_name'] = group_params['unit_name'].replace('{{自定义}}', '')
 
         # 4、处理 cpa_bid
         # 因为存在 【单出价请输入单数字,阶梯出价请用/隔开】 如:1900/2000/2100 cpa_bid varchar(255) DEFAULT NULL COMMENT '出价'
@@ -638,7 +556,7 @@ class GetTargetAndAssemblyParameters(object):
             random.shuffle(description_lst)
             creative_params['description'] = description_lst[0]
 
-        # 2 写入数据库的创意参数
+        # 2 写入数据库的创意参数(数据库表结构没有下面的字段,因此入库时去掉这些字段)
         creative_params_to_db = creative_params.copy()
         drop_cols = ['put_status', 'live_creative_type']
         for col in drop_cols:
@@ -665,11 +583,11 @@ class GetTargetAndAssemblyParameters(object):
         更新 intelligence_strategy_table
         :return:
         """
-        db_con = pymysql.connect(host=self.db_config['host'],
-                                 port=self.db_config['port'],
-                                 user=self.db_config['username'],
-                                 password=self.db_config['password'],
-                                 database=self.db_config['database'],
+        db_con = pymysql.connect(host=jeecg_product_db['host'],
+                                 port=jeecg_product_db['port'],
+                                 user=jeecg_product_db['username'],
+                                 password=jeecg_product_db['password'],
+                                 database=jeecg_product_db['database'],
                                  charset='utf8')
         cursor = db_con.cursor()
         if message:

+ 0 - 2
ai_target_server.py

@@ -3,13 +3,11 @@ import tornado.log
 import tornado.options
 import tornado.web
 import platform
-from ai_target_combine_handler import AiTargetCombine
 from ai_callback_handler import AiCallBackAddCreative, AiCallBackAddGroup
 
 
 def create_application():
     web_application = tornado.web.Application([
-        (r"/ai_target_combine", AiTargetCombine),
         (r"/ai_callback_add_creative", AiCallBackAddCreative),
         (r"/ai_callback_add_group", AiCallBackAddGroup),
     ])

+ 19 - 20
config/config.yaml

@@ -2,30 +2,29 @@
 projectId:
   - 458
 
-targetType:
-  - 'action_ratio'
-  - 'convert_ratio'
 
-productDB:
+jeecg_product_db:
   host: 139.186.27.96
   username: data
   password: hcst@2021
   port: 3390
   database: jeecg-boot
 
-devDB:
+
+application_product_db:
+  host: 139.186.27.96
+  username: data
+  password: hcst@2021
+  port: 3390
+  database: application
+
+jeecg_dev_db:
   host: 139.186.165.84
   username: hcst
   password: hcst@2020
   port: 3306
   database: jeecg-boot
 
-localDB:
-  host: 192.168.1.193
-  username: root
-  password: root@123
-  port: 3306
-  database: mysql
 
 
 
@@ -41,6 +40,8 @@ bayesDim:
           - '50+岁'
         table: 'ctop_kuaishou_audience_report_daily_age_material'
         fieldName: 'age_segment'
+        ratio_threshold: 0.8
+        ratio_diff_threshold: 0.1
     gender:
         windowSize: 1
         isOn: True
@@ -49,6 +50,8 @@ bayesDim:
             - '女'
         table: 'ctop_kuaishou_audience_report_daily_gender_material'
         fieldName: 'gender'
+        ratio_threshold: 0.7
+        ratio_diff_threshold: 0.1
     city:
         windowSize: 4
         isOn: False
@@ -61,6 +64,7 @@ bayesDim:
             - '五线城市'
         table: 'ctop_kuaishou_audience_report_daily_city_material'
         fieldName: 'city'
+        ratio_threshold: 0.7
 
 
 # 活跃素材筛选规则:近60天累计激活个数达到100个
@@ -76,13 +80,8 @@ getBaysCombineMaterialFilterRule:
 
 
 filterTargetCombine:
-    actionRatio:
-        sampleSize: 10000
-        improveRatio: 0.2
-    convertRatio:
-        sampleSize: 10000
-        improveRatio: 0.2
-    combineCnt: 20
+  #TODO 上线后需要修改为20
+    combineCnt: 10
 
 
 mac_ip_config:
@@ -91,8 +90,8 @@ mac_ip_config:
       ai_target_url: "http://139.186.165.84:31012/"
       yourong_url: "http://192.168.0.195:8806/jeecg-boot/"
   # 测试环境
-  '52540003f5dd':
-      ai_target_url: "http://139.186.134.115:31012/"
+  '00163e9162d9':
+      ai_target_url: "http://192.168.0.195:31012/"
       yourong_url: "http://192.168.0.195:8806/jeecg-boot/"
   # 生产环境
   '525400c98142':

+ 12 - 2
config/url.py

@@ -1,13 +1,23 @@
 import uuid
 import yaml
-
+from utils.commonFunc import get_db_engine
 headers = {'Content-Type': 'application/json'}
 
 with open('/data/pythonProject/ai_target/config/config.yaml', mode='r', encoding='utf-8') as f:
     config = yaml.load(f.read(), Loader=yaml.FullLoader)
     mac_ip_config = config['mac_ip_config']
 
+
+# 数据库连接引擎,依据开发环境/测试环境/生产环境 进行切换
 mac = uuid.UUID(int=uuid.getnode()).hex[-12:]
+if mac in ['5254003fa716', '00163e9162d9']:
+    jeecg_db = get_db_engine(config['jeecg_dev_db'])
+else:
+    jeecg_db = get_db_engine(config['jeecg_product_db'])
+
+jeecg_product_db = get_db_engine(config['jeecg_product_db'])
+application_product_db = get_db_engine(config['application_product_db'])
+
 
 # 创建计划url
 create_campaign_url = mac_ip_config[mac]['yourong_url'] + 'kuaishou/create/campaignCreate'
@@ -39,7 +49,7 @@ update_campaign_status_url = mac_ip_config[mac]['yourong_url'] + 'kuaishou/batch
 create_group_and_creative_url = mac_ip_config[mac]['yourong_url'] + 'kuaishou/create/createUnitAndCreative'
 
 # ai_target_combine 服务的接口
-target_combine_url = mac_ip_config[mac]['ai_target_url'] + 'ai_target_combine'
+# target_combine_url = mac_ip_config[mac]['ai_target_url'] + 'ai_target_combine'
 
 # 定向人群预估查询
 estimate_people_number_url = mac_ip_config[mac]['yourong_url'] + 'estimatePeopleNumber/getCount'

+ 32 - 63
time_task_create_ad_by_target.py

@@ -1,95 +1,64 @@
-from concurrent.futures import ThreadPoolExecutor
-import requests
 import datetime
-import json
-import pandas as pd
 import uuid
-import traceback
+from concurrent.futures import ThreadPoolExecutor
+
+import pandas as pd
 import yaml
-from utils.commonFunc import get_db_engine
-from config.url import target_combine_url
-import logging
-from concurrent_log import ConcurrentTimedRotatingFileHandler
+from loguru import logger
 
+from config.url_and_db import jeecg_product_db
+from ai_target_create_ads import ai_target_combine_create
 
-log_formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', '%m/%d/%Y %I:%M:%S %p')
-log_handler = ConcurrentTimedRotatingFileHandler("/data/pythonProject/ai_target/logs/time_task_create_ad_by_target.log", when="midnight", backupCount=100)
-log_handler.setFormatter(log_formatter)
-logger = logging.getLogger('time_task_create_ad_by_target_logger')
-logger.addHandler(log_handler)
-logger.setLevel(logging.DEBUG)
-print('id of time_task_create_ad_by_target_logger %s' % id(logger))
+logger.remove()  # 删去 import logger之后自动产生的handler,不删除的话会出现重复输出的现象
+logger.add("/data/pythonProject/ai_target/logs/time_task_create_ad_by_target.{time:YYYY-MM-DD}.log",
+           rotation="00:00",
+           format="{time:YYYY-MM-DD HH:mm:ss,SSS} [{process}] [{thread}] {level} {file} {line} - {message}",
+           level="INFO")
 
 with open('/data/pythonProject/ai_target/config/config.yaml', mode='r', encoding='utf-8') as f:
     config = yaml.load(f.read(), Loader=yaml.FullLoader)
-product_db_engine = get_db_engine(config['productDB'])
-project_ids = config['projectId']  # 目前支持优质定向功能的项目列表
-project_ids = project_ids if len(project_ids) > 1 else project_ids*2
-
-
-def send_request(args):
-    try:
-        request_data = json.dumps({"account_id": args[0], "project_id": args[1]})
-        request = requests.post(target_combine_url, request_data)
-        result = json.loads(request.text)
-        logger.info("account_id= %s,  project_id=%s, result=%s" % (args[0], args[1], result))
-        return result
-    except Exception:
-        logger.error("account_id= %s,  project_id=%s, traceback=%s" % (args[0], args[1], traceback.format_exc()))
-        return "error"
 
+project_ids = config['projectId']  # 目前支持优质定向功能的项目列表
+project_ids = project_ids if len(project_ids) > 1 else project_ids * 2
 
 # 0、 打印该定时任务被调用的时间
 unique_uuid = str(uuid.uuid4())
-logger.info("***********优质定向创建-Start, unique_uuid = %s ,  time = %s **************" % (unique_uuid, datetime.datetime.now()))
+logger.info(f"*******优质定向创建-Start, unique_uuid = {unique_uuid} ,  {datetime.datetime.now()} ********")
 # 1、获取配置了【优质定向】的 account_id 及其 project_id
 # system_optimization 是开启系统优选  0否 1是
-sql = """
-select t1.account_id, t2.project_id
-from
-(select account_id from ctop_ai_kuaishou_advertiser_strategy where status = 1
-  and system_optimization = 1
- ) t1
- left join 
- (select project_id,account_id from ctop_user_allocation where project_id in %s) t2
- on t1.account_id = t2.account_id
-""" % (tuple(project_ids),)
+sql = f"select t1.account_id, t2.project_id " \
+      f"from (select account_id from ctop_ai_kuaishou_advertiser_strategy where status = 1 and system_optimization = 1) t1 " \
+      f"left join (select project_id, account_id from ctop_user_allocation where project_id in {tuple(project_ids)} and account_status = 0) t2 " \
+      f"on t1.account_id = t2.account_id " \
+      f"where t2.account_id is not null"
 
-account_df = pd.read_sql(sql, product_db_engine)
+account_df = pd.read_sql(sql, jeecg_product_db)
 account_df.drop_duplicates('account_id', keep='first', inplace=True)
 acc_list = [(int(item['account_id']), int(item['project_id'])) for index, item in account_df.iterrows()]
 
 # TODO 注释测试代码 for test
 # acc_list = [(10456827, 458), (9774238, 458)]
 # acc_list = [(10456827, 458), (9774238, 458)]
-# acc_list = [(10456827, 458)]
-# acc_list = [(9774099, 458)]
-# acc_list = [(9774238, 458)]
-# acc_list = [(9767003, 458)]
+# acc_list = [(11041466, 458)]  # 李琳的账户
+acc_list = [(10967859, 458)]  # 郭浩的账户
+
+def task(args):
+    account_id = args[0]
+    project_id = args[1]
+    ai_target_combine_create(account_id, project_id)
 
 
 # 2、 5个账户一组进行发送请求
 batch_num = 5
 for i in range(0, len(acc_list), batch_num):
     if i + batch_num < len(acc_list):
-        logger.info("time is %s, batch task is %s" % (datetime.datetime.now(), acc_list[i:i+batch_num]))
+        logger.info(f"{datetime.datetime.now()}, batch task is {acc_list[i:i + batch_num]}")
         with ThreadPoolExecutor(max_workers=batch_num) as pool:
-            results = pool.map(send_request, tuple(acc_list[i: i+batch_num]))
+            results = pool.map(task, tuple(acc_list[i: i + batch_num]))
     else:
+        logger.info(f"{datetime.datetime.now()}, batch task is {acc_list[i: len(acc_list)]}")
         with ThreadPoolExecutor(max_workers=batch_num) as pool:
-            logger.info("time is %s, batch task is %s" % (datetime.datetime.now(), acc_list[i: len(acc_list)]))
-            results = pool.map(send_request, tuple(acc_list[i: len(acc_list)]))
-
+            results = pool.map(task, tuple(acc_list[i: len(acc_list)]))
 
 # 3、 打印该定时任务结束的时间
-logger.info("***********优质定向创建-End, unique_uuid = %s , time = %s **************" % (unique_uuid, datetime.datetime.now()))
-
-
-
-
-
-
-
-
-
-
+logger.info(f"******优质定向创建-End, unique_uuid = {unique_uuid} , {datetime.datetime.now()} ******")

+ 2 - 2
utils/commonFunc.py

@@ -48,7 +48,7 @@ def is_contains_region(val1, val2):
     with open('/data/pythonProject/ai_target/config/config.yaml', mode='r', encoding='utf-8') as f:
         config = yaml.load(f.read(), Loader=yaml.FullLoader)
 
-    city_df = pd.read_sql(sql, get_db_engine(config['productDB']))
+    city_df = pd.read_sql(sql, get_db_engine(config['jeecg_product_db']))
 
     # 如果【吉林】在city_level里面的话,在ctop_kuaishou_region_list_parent会同时把 吉林省和吉林市过滤出来
     # 按level进行降序排列后,按city_name进行去重,保留低等级的值
@@ -187,7 +187,7 @@ def city_code_transform(val):
 
         with open('/data/pythonProject/ai_target/config/config.yaml', mode='r', encoding='utf-8') as f:
             config = yaml.load(f.read(), Loader=yaml.FullLoader)
-        city_df = pd.read_sql(sql, get_db_engine(config['productDB']))
+        city_df = pd.read_sql(sql, get_db_engine(config['jeecg_product_db']))
 
         # 如果【吉林】在city_level里面的话,在ctop_kuaishou_region_list_parent会同时把 吉林省和吉林市查询出来
         # 按level进行降序排列后,按city_name进行去重,保留高等级的值